function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
|---|---|---|---|---|---|---|
when
|
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
|
import collections
import functools
import itertools
import operator
from contextlib import suppress
from typing import Any, Dict, List
import numpy as np
import toolz
from cached_property import cached_property
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis import util
from ibis.expr.schema import HasSchema, Schema
from ibis.expr.signature import Annotable
from ibis.expr.signature import Argument as Arg
def _safe_repr(x, memo=None):
return x._repr(memo=memo) if isinstance(x, (ir.Expr, Node)) else repr(x)
# TODO: move to analysis
def distinct_roots(*expressions):
roots = toolz.concat(expr.op().root_tables() for expr in expressions)
return list(toolz.unique(roots))
class Node(Annotable):
__slots__ = '_expr_cached', '_hash'
def __repr__(self):
return self._repr()
def _repr(self, memo=None):
if memo is None:
from ibis.expr.format import FormatMemo
memo = FormatMemo()
opname = type(self).__name__
pprint_args = []
def _pp(x):
return _safe_repr(x, memo=memo)
for x in self.args:
if isinstance(x, (tuple, list)):
pp = repr(list(map(_pp, x)))
else:
pp = _pp(x)
pprint_args.append(pp)
return '{}({})'.format(opname, ', '.join(pprint_args))
def __getstate__(self) -> Dict[str, Any]:
"""The attributes _expr_cached and _hash are
used as caches; they can be excluded from
serialization without affecting correctness.
Excluding _expr_cached and _hash from serialization
will allow the serialized bytes to be the same for
equivalent Node objets.
Returns
-------
Dict[str, Any]
A dictionary storing the objects attributes.
"""
excluded_slots = {'_expr_cached', '_hash'}
return {
slot: getattr(self, slot)
for slot in self.__slots__
if slot not in excluded_slots
}
def __setstate__(self, state: Dict[str, Any]) -> None:
"""
Parameters
----------
state: Dict[str, Any]
A dictionary storing the objects attributes.
"""
for slot in state:
setattr(self, slot, state[slot])
@property
def inputs(self):
return tuple(self.args)
def blocks(self):
# The contents of this node at referentially distinct and may not be
# analyzed deeper
return False
def flat_args(self):
for arg in self.args:
if not isinstance(arg, str) and isinstance(
arg, collections.abc.Iterable
):
for x in arg:
yield x
else:
yield arg
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(
(type(self),)
+ tuple(
element.op() if isinstance(element, ir.Expr) else element
for element in self.flat_args()
)
)
return self._hash
def __eq__(self, other):
return self.equals(other)
def equals(self, other, cache=None):
if cache is None:
cache = {}
key = self, other
try:
return cache[key]
except KeyError:
cache[key] = result = self is other or (
type(self) == type(other)
and all_equal(self.args, other.args, cache=cache)
)
return result
def compatible_with(self, other):
return self.equals(other)
def is_ancestor(self, other):
if isinstance(other, ir.Expr):
other = other.op()
return self.equals(other)
def to_expr(self):
if not hasattr(self, '_expr_cached'):
self._expr_cached = self._make_expr()
return self._expr_cached
def _make_expr(self):
klass = self.output_type()
return klass(self)
def output_type(self):
"""
This function must resolve the output type of the expression and return
the node wrapped in the appropriate ValueExpr type.
"""
raise NotImplementedError
class ValueOp(Node):
def root_tables(self):
exprs = [arg for arg in self.args if isinstance(arg, ir.Expr)]
return distinct_roots(*exprs)
def resolve_name(self):
raise com.ExpressionError(f'Expression is not named: {type(self)}')
def has_resolved_name(self):
return False
def all_equal(left, right, cache=None):
"""Check whether two objects `left` and `right` are equal.
Parameters
----------
left : Union[object, Expr, Node]
right : Union[object, Expr, Node]
cache : Optional[Dict[Tuple[Node, Node], bool]]
A dictionary indicating whether two Nodes are equal
"""
if cache is None:
cache = {}
if util.is_iterable(left):
# check that left and right are equal length iterables and that all
# of their elements are equal
return (
util.is_iterable(right)
and len(left) == len(right)
and all(
itertools.starmap(
functools.partial(all_equal, cache=cache), zip(left, right)
)
)
)
if hasattr(left, 'equals'):
return left.equals(right, cache=cache)
return left == right
_table_names = ('unbound_table_{:d}'.format(i) for i in itertools.count())
def genname():
return next(_table_names)
class TableNode(Node):
def get_type(self, name):
return self.schema[name]
def output_type(self):
return ir.TableExpr
def aggregate(self, this, metrics, by=None, having=None):
return Aggregation(this, metrics, by=by, having=having)
def sort_by(self, expr, sort_exprs):
return Selection(expr, [], sort_keys=sort_exprs)
def is_ancestor(self, other):
import ibis.expr.lineage as lin
if isinstance(other, ir.Expr):
other = other.op()
if self.equals(other):
return True
fn = lambda e: (lin.proceed, e.op()) # noqa: E731
expr = self.to_expr()
for child in lin.traverse(fn, expr):
if child.equals(other):
return True
return False
class TableColumn(ValueOp):
"""Selects a column from a TableExpr"""
name = Arg((str, int))
table = Arg(ir.TableExpr)
def __init__(self, name, table):
schema = table.schema()
if isinstance(name, int):
name = schema.name_at_position(name)
super().__init__(name, table)
def _validate(self):
if self.name not in self.table.schema():
raise com.IbisTypeError(
"'{}' is not a field in {}".format(
self.name, self.table.columns
)
)
def parent(self):
return self.table
def resolve_name(self):
return self.name
def has_resolved_name(self):
return True
def root_tables(self):
return self.table.op().root_tables()
def _make_expr(self):
dtype = self.table._get_type(self.name)
klass = dtype.column_type()
return klass(self, name=self.name)
class RowID(ValueOp):
"""The row number (an autonumeric) of the returned result."""
def output_type(self):
return dt.int64.column_type()
def resolve_name(self):
return 'rowid'
def has_resolved_name(self):
return True
def find_all_base_tables(expr, memo=None):
if memo is None:
memo = {}
node = expr.op()
if isinstance(expr, ir.TableExpr) and node.blocks():
if expr not in memo:
memo[node] = expr
return memo
for arg in expr.op().flat_args():
if isinstance(arg, ir.Expr):
find_all_base_tables(arg, memo)
return memo
class PhysicalTable(TableNode, HasSchema):
def blocks(self):
return True
class UnboundTable(PhysicalTable):
schema = Arg(sch.Schema)
name = Arg(str, default=genname)
class DatabaseTable(PhysicalTable):
name = Arg(str)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def change_name(self, new_name):
return type(self)(new_name, self.args[1], self.source)
class SQLQueryResult(TableNode, HasSchema):
"""A table sourced from the result set of a select query"""
query = Arg(rlz.noop)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def blocks(self):
return True
class TableArrayView(ValueOp):
"""
(Temporary?) Helper operation class for SQL translation (fully formed table
subqueries to be viewed as arrays)
"""
table = Arg(ir.TableExpr)
name = Arg(str)
def __init__(self, table):
schema = table.schema()
if len(schema) > 1:
raise com.ExpressionError('Table can only have a single column')
name = schema.names[0]
return super().__init__(table, name)
def _make_expr(self):
ctype = self.table._get_type(self.name)
klass = ctype.column_type()
return klass(self, name=self.name)
class UnaryOp(ValueOp):
arg = Arg(rlz.any)
class BinaryOp(ValueOp):
"""A binary operation"""
left = Arg(rlz.any)
right = Arg(rlz.any)
class Cast(ValueOp):
arg = Arg(rlz.any)
to = Arg(dt.dtype)
# see #396 for the issue preventing this
# def resolve_name(self):
# return self.args[0].get_name()
def output_type(self):
return rlz.shape_like(self.arg, dtype=self.to)
class TypeOf(UnaryOp):
output_type = rlz.shape_like('arg', dt.string)
class Negate(UnaryOp):
arg = Arg(rlz.one_of((rlz.numeric(), rlz.interval())))
output_type = rlz.typeof('arg')
class IsNull(UnaryOp):
"""Returns true if values are null
Returns
-------
isnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class NotNull(UnaryOp):
"""Returns true if values are not null
Returns
-------
notnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class ZeroIfNull(UnaryOp):
output_type = rlz.typeof('arg')
class IfNull(ValueOp):
"""Equivalent to (but perhaps implemented differently):
case().when(expr.notnull(), expr)
.else_(null_substitute_expr)
"""
arg = Arg(rlz.any)
ifnull_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIf(ValueOp):
"""Set values to NULL if they equal the null_if_expr"""
arg = Arg(rlz.any)
null_if_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIfZero(ValueOp):
"""
Set values to NULL if they equal to zero. Commonly used in cases where
divide-by-zero would produce an overflow or infinity.
Equivalent to (value == 0).ifelse(ibis.NA, value)
Returns
-------
maybe_nulled : type of caller
"""
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class IsNan(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class IsInf(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class CoalesceLike(ValueOp):
# According to Impala documentation:
# Return type: same as the initial argument value, except that integer
# values are promoted to BIGINT and floating-point values are promoted to
# DOUBLE; use CAST() when inserting into a smaller numeric column
arg = Arg(rlz.list_of(rlz.any))
def output_type(self):
first = self.arg[0]
if isinstance(first, (ir.IntegerValue, ir.FloatingValue)):
dtype = first.type().largest
else:
dtype = first.type()
# self.arg is a list of value expressions
return rlz.shape_like(self.arg, dtype)
class Coalesce(CoalesceLike):
pass
class Greatest(CoalesceLike):
pass
class Least(CoalesceLike):
pass
class Abs(UnaryOp):
"""Absolute value"""
output_type = rlz.typeof('arg')
class Ceil(UnaryOp):
"""
Round up to the nearest integer value greater than or equal to this value
Returns
-------
ceiled : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Floor(UnaryOp):
"""
Round down to the nearest integer value less than or equal to this value
Returns
-------
floored : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Round(ValueOp):
arg = Arg(rlz.numeric)
digits = Arg(rlz.numeric, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
return self.arg._factory
elif self.digits is None:
return rlz.shape_like(self.arg, dt.int64)
else:
return rlz.shape_like(self.arg, dt.double)
class Clip(ValueOp):
arg = Arg(rlz.strict_numeric)
lower = Arg(rlz.strict_numeric, default=None)
upper = Arg(rlz.strict_numeric, default=None)
output_type = rlz.typeof('arg')
class BaseConvert(ValueOp):
arg = Arg(rlz.one_of([rlz.integer, rlz.string]))
from_base = Arg(rlz.integer)
to_base = Arg(rlz.integer)
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class MathUnaryOp(UnaryOp):
arg = Arg(rlz.numeric)
def output_type(self):
arg = self.arg
if isinstance(self.arg, ir.DecimalValue):
dtype = arg.type()
else:
dtype = dt.double
return rlz.shape_like(arg, dtype)
class ExpandingTypeMathUnaryOp(MathUnaryOp):
def output_type(self):
if not isinstance(self.arg, ir.DecimalValue):
return super().output_type()
arg = self.arg
return rlz.shape_like(arg, arg.type().largest)
class Exp(ExpandingTypeMathUnaryOp):
pass
class Sign(UnaryOp):
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class Sqrt(MathUnaryOp):
pass
class Logarithm(MathUnaryOp):
arg = Arg(rlz.strict_numeric)
class Log(Logarithm):
arg = Arg(rlz.strict_numeric)
base = Arg(rlz.strict_numeric, default=None)
class Ln(Logarithm):
"""Natural logarithm"""
class Log2(Logarithm):
"""Logarithm base 2"""
class Log10(Logarithm):
"""Logarithm base 10"""
class Degrees(ExpandingTypeMathUnaryOp):
"""Converts radians to degrees"""
arg = Arg(rlz.numeric)
class Radians(MathUnaryOp):
"""Converts degrees to radians"""
arg = Arg(rlz.numeric)
# TRIGONOMETRIC OPERATIONS
class TrigonometricUnary(MathUnaryOp):
"""Trigonometric base unary"""
arg = Arg(rlz.numeric)
class TrigonometricBinary(BinaryOp):
"""Trigonometric base binary"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.float64)
class Acos(TrigonometricUnary):
"""Returns the arc cosine of x"""
class Asin(TrigonometricUnary):
"""Returns the arc sine of x"""
class Atan(TrigonometricUnary):
"""Returns the arc tangent of x"""
class Atan2(TrigonometricBinary):
"""Returns the arc tangent of x and y"""
class Cos(TrigonometricUnary):
"""Returns the cosine of x"""
class Cot(TrigonometricUnary):
"""Returns the cotangent of x"""
class Sin(TrigonometricUnary):
"""Returns the sine of x"""
class Tan(TrigonometricUnary):
"""Returns the tangent of x"""
class StringUnaryOp(UnaryOp):
arg = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class Uppercase(StringUnaryOp):
"""Convert string to all uppercase"""
class Lowercase(StringUnaryOp):
"""Convert string to all lowercase"""
class Reverse(StringUnaryOp):
"""Reverse string"""
class Strip(StringUnaryOp):
"""Remove whitespace from left and right sides of string"""
class LStrip(StringUnaryOp):
"""Remove whitespace from left side of string"""
class RStrip(StringUnaryOp):
"""Remove whitespace from right side of string"""
class Capitalize(StringUnaryOp):
"""Return a capitalized version of input string"""
class Substring(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.integer)
length = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StrRight(ValueOp):
arg = Arg(rlz.string)
nchars = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class Repeat(ValueOp):
arg = Arg(rlz.string)
times = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class StringFind(ValueOp):
arg = Arg(rlz.string)
substr = Arg(rlz.string)
start = Arg(rlz.integer, default=None)
end = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.int64)
class Translate(ValueOp):
arg = Arg(rlz.string)
from_str = Arg(rlz.string)
to_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class LPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class RPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class FindInSet(ValueOp):
needle = Arg(rlz.string)
values = Arg(rlz.list_of(rlz.string, min_length=1))
output_type = rlz.shape_like('needle', dt.int64)
class StringJoin(ValueOp):
sep = Arg(rlz.string)
arg = Arg(rlz.list_of(rlz.string, min_length=1))
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class StartsWith(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class EndsWith(ValueOp):
arg = Arg(rlz.string)
end = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class BooleanValueOp:
pass
class FuzzySearch(ValueOp, BooleanValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.boolean)
class StringSQLLike(FuzzySearch):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
escape = Arg(str, default=None)
class StringSQLILike(StringSQLLike):
"""SQL ilike operation"""
class RegexSearch(FuzzySearch):
pass
class RegexExtract(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
index = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class RegexReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringSplit(ValueOp):
arg = Arg(rlz.string)
delimiter = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.Array(dt.string))
class StringConcat(ValueOp):
arg = Arg(rlz.list_of(rlz.string))
output_type = rlz.shape_like('arg', dt.string)
class ParseURL(ValueOp):
arg = Arg(rlz.string)
extract = Arg(
rlz.isin(
{
'PROTOCOL',
'HOST',
'PATH',
'REF',
'AUTHORITY',
'FILE',
'USERINFO',
'QUERY',
}
)
)
key = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StringLength(UnaryOp):
"""
Compute length of strings
Returns
-------
length : int32
"""
output_type = rlz.shape_like('arg', dt.int32)
class StringAscii(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
# ----------------------------------------------------------------------
class Reduction(ValueOp):
_reduction = True
class Count(Reduction):
arg = Arg((ir.ColumnExpr, ir.TableExpr))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class Arbitrary(Reduction):
arg = Arg(rlz.column(rlz.any))
how = Arg(rlz.isin({'first', 'last', 'heavy'}), default=None)
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitAnd(Reduction):
"""Aggregate bitwise AND operation.
All elements in an integer column are ANDed together. This can be used
to determine which bit flags are set on all elements.
Resources:
* `BigQuery BIT_AND
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_and>`_
* `MySQL BIT_AND
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-and>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitOr(Reduction):
"""Aggregate bitwise OR operation.
All elements in an integer column are ORed together. This can be used
to determine which bit flags are set on any element.
Resources:
* `BigQuery BIT_OR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_or>`_
* `MySQL BIT_OR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-or>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitXor(Reduction):
"""Aggregate bitwise XOR operation.
All elements in an integer column are XORed together. This can be used
as a parity checksum of element values.
Resources:
* `BigQuery BIT_XOR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_xor>`_
* `MySQL BIT_XOR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-xor>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Sum(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.scalar_type()
class Mean(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type()
else:
dtype = dt.float64
return dtype.scalar_type()
class Quantile(Reduction):
arg = Arg(rlz.any)
quantile = Arg(rlz.strict_numeric)
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.float64.scalar_type()
class MultiQuantile(Quantile):
arg = Arg(rlz.any)
quantile = Arg(rlz.value(dt.Array(dt.float64)))
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.Array(dt.float64).scalar_type()
class VarianceBase(Reduction):
arg = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.scalar_type()
class StandardDev(VarianceBase):
pass
class Variance(VarianceBase):
pass
class Correlation(Reduction):
"""Coefficient of correlation of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Covariance(Reduction):
"""Covariance of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Max(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Min(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class HLLCardinality(Reduction):
"""Approximate number of unique values using HyperLogLog algorithm.
Impala offers the NDV built-in function for this.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
# Impala 2.0 and higher returns a DOUBLE
# return ir.DoubleScalar
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class GroupConcat(Reduction):
arg = Arg(rlz.column(rlz.any))
sep = Arg(rlz.string, default=',')
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.string.scalar_type()
class CMSMedian(Reduction):
"""
Compute the approximate median of a set of comparable values using the
Count-Min-Sketch algorithm. Exposed in Impala using APPX_MEDIAN.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
# ----------------------------------------------------------------------
# Analytic functions
class AnalyticOp(ValueOp):
pass
class WindowOp(ValueOp):
expr = Arg(rlz.noop)
window = Arg(rlz.noop)
output_type = rlz.array_like('expr')
display_argnames = False
def __init__(self, expr, window):
from ibis.expr.analysis import is_analytic
from ibis.expr.window import propagate_down_window
if not is_analytic(expr):
raise com.IbisInputError(
'Expression does not contain a valid window operation'
)
table = ir.find_base_table(expr)
if table is not None:
window = window.bind(table)
if window.max_lookback is not None:
error_msg = (
"'max lookback' windows must be ordered "
"by a timestamp column"
)
if len(window._order_by) != 1:
raise com.IbisInputError(error_msg)
order_var = window._order_by[0].op().args[0]
if not isinstance(order_var.type(), dt.Timestamp):
raise com.IbisInputError(error_msg)
expr = propagate_down_window(expr, window)
super().__init__(expr, window)
def over(self, window):
new_window = self.window.combine(window)
return WindowOp(self.expr, new_window)
@property
def inputs(self):
return self.expr.op().inputs[0], self.window
def root_tables(self):
return distinct_roots(
self.expr, *self.window._order_by, *self.window._group_by
)
class ShiftBase(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
offset = Arg(rlz.one_of((rlz.integer, rlz.interval)), default=None)
default = Arg(rlz.any, default=None)
output_type = rlz.typeof('arg')
class Lag(ShiftBase):
pass
class Lead(ShiftBase):
pass
class RankBase(AnalyticOp):
def output_type(self):
return dt.int64.column_type()
class MinRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order.
Examples
--------
values ranks
1 0
1 0
2 2
2 2
2 2
3 5
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL RANK()
arg = Arg(rlz.column(rlz.any))
class DenseRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order, ignoring duplicate values.
Examples
--------
values ranks
1 0
1 0
2 1
2 1
2 1
3 2
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL DENSE_RANK()
arg = Arg(rlz.column(rlz.any))
class RowNumber(RankBase):
"""
Compute row number starting from 0 after sorting by column expression
Examples
--------
>>> import ibis
>>> t = ibis.table([('values', dt.int64)])
>>> w = ibis.window(order_by=t.values)
>>> row_num = ibis.row_number().over(w)
>>> result = t[t.values, row_num.name('row_num')]
Returns
-------
row_number : Int64Column, starting from 0
"""
# Equivalent to SQL ROW_NUMBER()
class CumulativeOp(AnalyticOp):
pass
class CumulativeSum(CumulativeOp):
"""Cumulative sum. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.column_type()
class CumulativeMean(CumulativeOp):
"""Cumulative mean. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.column_type()
class CumulativeMax(CumulativeOp):
"""Cumulative max. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class CumulativeMin(CumulativeOp):
"""Cumulative min. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class PercentRank(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.shape_like('arg', dt.double)
class NTile(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
buckets = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.int64)
class FirstValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class LastValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class NthValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
nth = Arg(rlz.integer)
output_type = rlz.typeof('arg')
# ----------------------------------------------------------------------
# Distinct stuff
class Distinct(TableNode, HasSchema):
"""
Distinct is a table-level unique-ing operation.
In SQL, you might have:
SELECT DISTINCT foo
FROM table
SELECT DISTINCT foo, bar
FROM table
"""
table = Arg(ir.TableExpr)
def _validate(self):
# check whether schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.table.schema()
def blocks(self):
return True
class DistinctColumn(ValueOp):
"""
COUNT(DISTINCT ...) is really just syntactic suger, but we provide a
distinct().count() nicety for users nonetheless.
For all intents and purposes, like Distinct, but can be distinguished later
for evaluation if the result should be array-like versus table-like. Also
for calling count()
"""
arg = Arg(rlz.noop)
output_type = rlz.typeof('arg')
def count(self):
"""Only valid if the distinct contains a single column"""
return CountDistinct(self.arg)
class CountDistinct(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.int64.scalar_type()
# ---------------------------------------------------------------------
# Boolean reductions and semi/anti join support
class Any(ValueOp):
# Depending on the kind of input boolean array, the result might either be
# array-like (an existence-type predicate) or scalar (a reduction)
arg = Arg(rlz.column(rlz.boolean))
@property
def _reduction(self):
roots = self.arg.op().root_tables()
return len(roots) < 2
def output_type(self):
if self._reduction:
return dt.boolean.scalar_type()
else:
return dt.boolean.column_type()
def negate(self):
return NotAny(self.arg)
class All(ValueOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.scalar_like('arg')
_reduction = True
def negate(self):
return NotAll(self.arg)
class NotAny(Any):
def negate(self):
return Any(self.arg)
class NotAll(All):
def negate(self):
return All(self.arg)
class CumulativeAny(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
class CumulativeAll(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
# ---------------------------------------------------------------------
class TypedCaseBuilder:
__slots__ = ()
def type(self):
types = [result.type() for result in self.results]
return dt.highest_precedence(types)
def else_(self, result_expr):
"""
Specify
Returns
-------
builder : CaseBuilder
"""
kwargs = {
slot: getattr(self, slot)
for slot in self.__slots__
if slot != 'default'
}
result_expr = ir.as_value_expr(result_expr)
kwargs['default'] = result_expr
# Maintain immutability
return type(self)(**kwargs)
def end(self):
default = self.default
if default is None:
default = ir.null().cast(self.type())
args = [
getattr(self, slot) for slot in self.__slots__ if slot != 'default'
]
args.append(default)
op = self.__class__.case_op(*args)
return op.to_expr()
class SimpleCase(ValueOp):
base = Arg(rlz.any)
cases = Arg(rlz.list_of(rlz.any))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
return distinct_roots(
*itertools.chain(
[self.base],
self.cases,
self.results,
[] if self.default is None else [self.default],
)
)
def output_type(self):
exprs = self.results + [self.default]
return rlz.shape_like(self.base, dtype=exprs.type())
class SimpleCaseBuilder(TypedCaseBuilder):
__slots__ = 'base', 'cases', 'results', 'default'
case_op = SimpleCase
def __init__(self, base, cases=None, results=None, default=None):
self.base = base
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
# MASKED: when function (lines 1518-1549)
class SearchedCase(ValueOp):
cases = Arg(rlz.list_of(rlz.boolean))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
cases, results, default = self.args
return distinct_roots(
*itertools.chain(
cases.values,
results.values,
[] if default is None else [default],
)
)
def output_type(self):
exprs = self.results + [self.default]
dtype = rlz.highest_precedence_dtype(exprs)
return rlz.shape_like(self.cases, dtype)
class SearchedCaseBuilder(TypedCaseBuilder):
__slots__ = 'cases', 'results', 'default'
case_op = SearchedCase
def __init__(self, cases=None, results=None, default=None):
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(cases, results, self.default)
class Where(ValueOp):
"""
Ternary case expression, equivalent to
bool_expr.case()
.when(True, true_expr)
.else_(false_or_null_expr)
"""
bool_expr = Arg(rlz.boolean)
true_expr = Arg(rlz.any)
false_null_expr = Arg(rlz.any)
def output_type(self):
return rlz.shape_like(self.bool_expr, self.true_expr.type())
def _validate_join_tables(left, right):
if not isinstance(left, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'left table'.format(type(left).__name__)
)
if not isinstance(right, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'right table'.format(type(right).__name__)
)
def _make_distinct_join_predicates(left, right, predicates):
# see GH #667
# If left and right table have a common parent expression (e.g. they
# have different filters), must add a self-reference and make the
# appropriate substitution in the join predicates
if left.equals(right):
right = right.view()
predicates = _clean_join_predicates(left, right, predicates)
return left, right, predicates
def _clean_join_predicates(left, right, predicates):
import ibis.expr.analysis as L
result = []
if not isinstance(predicates, (list, tuple)):
predicates = [predicates]
for pred in predicates:
if isinstance(pred, tuple):
if len(pred) != 2:
raise com.ExpressionError('Join key tuple must be ' 'length 2')
lk, rk = pred
lk = left._ensure_expr(lk)
rk = right._ensure_expr(rk)
pred = lk == rk
elif isinstance(pred, str):
pred = left[pred] == right[pred]
elif not isinstance(pred, ir.Expr):
raise NotImplementedError
if not isinstance(pred, ir.BooleanColumn):
raise com.ExpressionError('Join predicate must be comparison')
preds = L.flatten_predicate(pred)
result.extend(preds)
_validate_join_predicates(left, right, result)
return result
def _validate_join_predicates(left, right, predicates):
from ibis.expr.analysis import fully_originate_from
# Validate join predicates. Each predicate must be valid jointly when
# considering the roots of each input table
for predicate in predicates:
if not fully_originate_from(predicate, [left, right]):
raise com.RelationError(
'The expression {!r} does not fully '
'originate from dependencies of the table '
'expression.'.format(predicate)
)
class Join(TableNode):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def __init__(self, left, right, predicates):
_validate_join_tables(left, right)
left, right, predicates = _make_distinct_join_predicates(
left, right, predicates
)
super().__init__(left, right, predicates)
def _get_schema(self):
# For joins retaining both table schemas, merge them together here
left = self.left
right = self.right
if not left._is_materialized():
left = left.materialize()
if not right._is_materialized():
right = right.materialize()
sleft = left.schema()
sright = right.schema()
overlap = set(sleft.names) & set(sright.names)
if overlap:
raise com.RelationError(
'Joined tables have overlapping names: %s' % str(list(overlap))
)
return sleft.append(sright)
def has_schema(self):
return False
def root_tables(self):
if util.all_of([self.left.op(), self.right.op()], (Join, Selection)):
# Unraveling is not possible
return [self.left.op(), self.right.op()]
else:
return distinct_roots(self.left, self.right)
class InnerJoin(Join):
pass
class LeftJoin(Join):
pass
class RightJoin(Join):
pass
class OuterJoin(Join):
pass
class AnyInnerJoin(Join):
pass
class AnyLeftJoin(Join):
pass
class LeftSemiJoin(Join):
def _get_schema(self):
return self.left.schema()
class LeftAntiJoin(Join):
def _get_schema(self):
return self.left.schema()
class MaterializedJoin(TableNode, HasSchema):
join = Arg(ir.TableExpr)
def _validate(self):
assert isinstance(self.join.op(), Join)
# check whether the underlying schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.join.op()._get_schema()
def root_tables(self):
return self.join.op().root_tables()
def blocks(self):
return True
class CrossJoin(InnerJoin):
"""
Some databases have a CROSS JOIN operator, that may be preferential to use
over an INNER JOIN with no predicates.
"""
def __init__(self, *args, **kwargs):
if 'prefixes' in kwargs:
raise NotImplementedError
if len(args) < 2:
raise com.IbisInputError('Must pass at least 2 tables')
left = args[0]
right = args[1]
for t in args[2:]:
right = right.cross_join(t)
InnerJoin.__init__(self, left, right, [])
class AsOfJoin(Join):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
by = Arg(rlz.noop, default=None)
tolerance = Arg(rlz.interval(), default=None)
def __init__(self, left, right, predicates, by, tolerance):
super().__init__(left, right, predicates)
self.by = _clean_join_predicates(self.left, self.right, by)
self.tolerance = tolerance
self._validate_args(['by', 'tolerance'])
def _validate_args(self, args: List[str]):
for arg in args:
argument = self.signature[arg]
value = argument.validate(getattr(self, arg))
setattr(self, arg, value)
class SetOp(TableNode, HasSchema):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
def _validate(self):
if not self.left.schema().equals(self.right.schema()):
raise com.RelationError(
'Table schemas must be equal for set operations'
)
@cached_property
def schema(self):
return self.left.schema()
def blocks(self):
return True
class Union(SetOp):
distinct = Arg(rlz.validator(bool), default=False)
class Intersection(SetOp):
pass
class Difference(SetOp):
pass
class Limit(TableNode):
table = Arg(ir.TableExpr)
n = Arg(rlz.validator(int))
offset = Arg(rlz.validator(int))
def blocks(self):
return True
@property
def schema(self):
return self.table.schema()
def has_schema(self):
return self.table.op().has_schema()
def root_tables(self):
return [self]
# --------------------------------------------------------------------
# Sorting
def to_sort_key(table, key):
if isinstance(key, DeferredSortKey):
key = key.resolve(table)
if isinstance(key, ir.SortExpr):
return key
if isinstance(key, (tuple, list)):
key, sort_order = key
else:
sort_order = True
if not isinstance(key, ir.Expr):
key = table._ensure_expr(key)
if isinstance(key, (ir.SortExpr, DeferredSortKey)):
return to_sort_key(table, key)
if isinstance(sort_order, str):
if sort_order.lower() in ('desc', 'descending'):
sort_order = False
elif not isinstance(sort_order, bool):
sort_order = bool(sort_order)
return SortKey(key, ascending=sort_order).to_expr()
class SortKey(Node):
expr = Arg(rlz.column(rlz.any))
ascending = Arg(rlz.validator(bool), default=True)
def __repr__(self):
# Temporary
rows = [
'Sort key:',
' ascending: {0!s}'.format(self.ascending),
util.indent(_safe_repr(self.expr), 2),
]
return '\n'.join(rows)
def output_type(self):
return ir.SortExpr
def root_tables(self):
return self.expr.op().root_tables()
def equals(self, other, cache=None):
# TODO: might generalize this equals based on fields
# requires a proxy class with equals for non expr values
return (
isinstance(other, SortKey)
and self.expr.equals(other.expr, cache=cache)
and self.ascending == other.ascending
)
def resolve_name(self):
return self.expr.get_name()
class DeferredSortKey:
def __init__(self, what, ascending=True):
self.what = what
self.ascending = ascending
def resolve(self, parent):
what = parent._ensure_expr(self.what)
return SortKey(what, ascending=self.ascending).to_expr()
class SelfReference(TableNode, HasSchema):
table = Arg(ir.TableExpr)
@cached_property
def schema(self):
return self.table.schema()
def root_tables(self):
# The dependencies of this operation are not walked, which makes the
# table expression holding this relationally distinct from other
# expressions, so things like self-joins are possible
return [self]
def blocks(self):
return True
class Selection(TableNode, HasSchema):
table = Arg(ir.TableExpr)
selections = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self, table, selections=None, predicates=None, sort_keys=None
):
import ibis.expr.analysis as L
# Argument cleaning
selections = util.promote_list(
selections if selections is not None else []
)
projections = []
for selection in selections:
if isinstance(selection, str):
projection = table[selection]
else:
projection = selection
projections.append(projection)
sort_keys = [
to_sort_key(table, k)
for k in util.promote_list(
sort_keys if sort_keys is not None else []
)
]
predicates = list(
toolz.concat(
map(
L.flatten_predicate,
predicates if predicates is not None else [],
)
)
)
super().__init__(
table=table,
selections=projections,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator
# Need to validate that the column expressions are compatible with the
# input table; this means they must either be scalar expressions or
# array expressions originating from the same root table expression
dependent_exprs = self.selections + self.sort_keys
self.table._assert_valid(dependent_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate no overlapping columns in schema
assert self.schema
@cached_property
def schema(self):
# Resolve schema and initialize
if not self.selections:
return self.table.schema()
types = []
names = []
for projection in self.selections:
if isinstance(projection, ir.DestructColumn):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = projection.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
elif isinstance(projection, ir.ValueExpr):
names.append(projection.get_name())
types.append(projection.type())
elif isinstance(projection, ir.TableExpr):
schema = projection.schema()
names.extend(schema.names)
types.extend(schema.types)
return Schema(names, types)
def blocks(self):
return bool(self.selections)
def substitute_table(self, table_expr):
return Selection(table_expr, self.selections)
def root_tables(self):
return [self]
def can_add_filters(self, wrapped_expr, predicates):
pass
@staticmethod
def empty_or_equal(lefts, rights):
return not lefts or not rights or all_equal(lefts, rights)
def compatible_with(self, other):
# self and other are equivalent except for predicates, selections, or
# sort keys any of which is allowed to be empty. If both are not empty
# then they must be equal
if self.equals(other):
return True
if not isinstance(other, type(self)):
return False
return self.table.equals(other.table) and (
self.empty_or_equal(self.predicates, other.predicates)
and self.empty_or_equal(self.selections, other.selections)
and self.empty_or_equal(self.sort_keys, other.sort_keys)
)
# Operator combination / fusion logic
def aggregate(self, this, metrics, by=None, having=None):
if len(self.selections) > 0:
return Aggregation(this, metrics, by=by, having=having)
else:
helper = AggregateSelection(this, metrics, by, having)
return helper.get_result()
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
if not self.blocks():
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Selection(
self.table,
self.selections,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class AggregateSelection:
# sort keys cannot be discarded because of order-dependent
# aggregate functions like GROUP_CONCAT
def __init__(self, parent, metrics, by, having):
self.parent = parent
self.op = parent.op()
self.metrics = metrics
self.by = by
self.having = having
def get_result(self):
if self.op.blocks():
return self._plain_subquery()
else:
return self._attempt_pushdown()
def _plain_subquery(self):
return Aggregation(
self.parent, self.metrics, by=self.by, having=self.having
)
def _attempt_pushdown(self):
metrics_valid, lowered_metrics = self._pushdown_exprs(self.metrics)
by_valid, lowered_by = self._pushdown_exprs(self.by)
having_valid, lowered_having = self._pushdown_exprs(
self.having or None
)
if metrics_valid and by_valid and having_valid:
return Aggregation(
self.op.table,
lowered_metrics,
by=lowered_by,
having=lowered_having,
predicates=self.op.predicates,
sort_keys=self.op.sort_keys,
)
else:
return self._plain_subquery()
def _pushdown_exprs(self, exprs):
import ibis.expr.analysis as L
if exprs is None:
return True, []
resolved = self.op.table._resolve(exprs)
subbed_exprs = []
valid = False
if resolved:
for x in util.promote_list(resolved):
subbed = L.sub_for(x, [(self.parent, self.op.table)])
subbed_exprs.append(subbed)
valid = self.op.table._is_valid(subbed_exprs)
else:
valid = False
return valid, subbed_exprs
def _maybe_convert_sort_keys(table, exprs):
try:
return [to_sort_key(table, k) for k in util.promote_list(exprs)]
except com.IbisError:
return None
class Aggregation(TableNode, HasSchema):
"""
metrics : per-group scalar aggregates
by : group expressions
having : post-aggregation predicate
TODO: not putting this in the aggregate operation yet
where : pre-aggregation predicate
"""
table = Arg(ir.TableExpr)
metrics = Arg(rlz.noop)
by = Arg(rlz.noop)
having = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self,
table,
metrics,
by=None,
having=None,
predicates=None,
sort_keys=None,
):
# For tables, like joins, that are not materialized
metrics = self._rewrite_exprs(table, metrics)
by = [] if by is None else by
by = table._resolve(by)
having = [] if having is None else having
predicates = [] if predicates is None else predicates
# order by only makes sense with group by in an aggregation
sort_keys = [] if not by or sort_keys is None else sort_keys
sort_keys = [
to_sort_key(table, k) for k in util.promote_list(sort_keys)
]
by = self._rewrite_exprs(table, by)
having = self._rewrite_exprs(table, having)
predicates = self._rewrite_exprs(table, predicates)
sort_keys = self._rewrite_exprs(table, sort_keys)
super().__init__(
table=table,
metrics=metrics,
by=by,
having=having,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator, is_reduction
# All aggregates are valid
for expr in self.metrics:
if not isinstance(expr, ir.ScalarExpr) or not is_reduction(expr):
raise TypeError(
'Passed a non-aggregate expression: %s' % _safe_repr(expr)
)
for expr in self.having:
if not isinstance(expr, ir.BooleanScalar):
raise com.ExpressionError(
'Having clause must be boolean '
'expression, was: {0!s}'.format(_safe_repr(expr))
)
# All non-scalar refs originate from the input table
all_exprs = self.metrics + self.by + self.having + self.sort_keys
self.table._assert_valid(all_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate schema has no overlapping columns
assert self.schema
def _rewrite_exprs(self, table, what):
what = util.promote_list(what)
all_exprs = []
for expr in what:
if isinstance(expr, ir.ExprList):
all_exprs.extend(expr.exprs())
else:
bound_expr = ir.bind_expr(table, expr)
all_exprs.append(bound_expr)
return all_exprs
# TODO - #2832
# this optimization becomes O(n^2) when it calls into
# _lift_TableColumn in analysis.py, which itself is O(n) and is
# called on each input to the aggregation - thus creating the
# aggregation expression can be extremely slow on wide tables
# that contain a Selection.
# return [
# substitute_parents(x, past_projection=False) for x in all_exprs
# ]
def blocks(self):
return True
def substitute_table(self, table_expr):
return Aggregation(
table_expr, self.metrics, by=self.by, having=self.having
)
@cached_property
def schema(self):
names = []
types = []
for e in self.by + self.metrics:
if isinstance(e, ir.DestructValue):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = e.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
else:
names.append(e.get_name())
types.append(e.type())
return Schema(names, types)
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Aggregation(
self.table,
self.metrics,
by=self.by,
having=self.having,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class NumericBinaryOp(BinaryOp):
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
class Add(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.add)
class Multiply(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mul)
class Power(NumericBinaryOp):
def output_type(self):
if util.all_of(self.args, ir.IntegerValue):
return rlz.shape_like(self.args, dt.float64)
else:
return rlz.shape_like(self.args)
class Subtract(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.sub)
class Divide(NumericBinaryOp):
output_type = rlz.shape_like('args', dt.float64)
class FloorDivide(Divide):
output_type = rlz.shape_like('args', dt.int64)
class LogicalBinaryOp(BinaryOp):
left = Arg(rlz.boolean)
right = Arg(rlz.boolean)
output_type = rlz.shape_like('args', dt.boolean)
class Not(UnaryOp):
arg = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.boolean)
class Modulus(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mod)
class And(LogicalBinaryOp):
pass
class Or(LogicalBinaryOp):
pass
class Xor(LogicalBinaryOp):
pass
class Comparison(BinaryOp, BooleanValueOp):
left = Arg(rlz.any)
right = Arg(rlz.any)
def __init__(self, left, right):
"""
Casting rules for type promotions (for resolving the output type) may
depend in some cases on the target backend.
TODO: how will overflows be handled? Can we provide anything useful in
Ibis to help the user avoid them?
:param left:
:param right:
"""
super().__init__(*self._maybe_cast_args(left, right))
def _maybe_cast_args(self, left, right):
# it might not be necessary?
with suppress(com.IbisTypeError):
return left, rlz.cast(right, left)
with suppress(com.IbisTypeError):
return rlz.cast(left, right), right
return left, right
def output_type(self):
if not rlz.comparable(self.left, self.right):
raise TypeError(
'Arguments with datatype {} and {} are '
'not comparable'.format(self.left.type(), self.right.type())
)
return rlz.shape_like(self.args, dt.boolean)
class Equals(Comparison):
pass
class NotEquals(Comparison):
pass
class GreaterEqual(Comparison):
pass
class Greater(Comparison):
pass
class LessEqual(Comparison):
pass
class Less(Comparison):
pass
class IdenticalTo(Comparison):
pass
class Between(ValueOp, BooleanValueOp):
arg = Arg(rlz.any)
lower_bound = Arg(rlz.any)
upper_bound = Arg(rlz.any)
def output_type(self):
arg, lower, upper = self.args
if not (rlz.comparable(arg, lower) and rlz.comparable(arg, upper)):
raise TypeError('Arguments are not comparable')
return rlz.shape_like(self.args, dt.boolean)
class BetweenTime(Between):
arg = Arg(rlz.one_of([rlz.timestamp, rlz.time]))
lower_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
upper_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
class Contains(ValueOp, BooleanValueOp):
value = Arg(rlz.any)
options = Arg(
rlz.one_of(
[
rlz.list_of(rlz.any),
rlz.set_,
rlz.column(rlz.any),
rlz.array_of(rlz.any),
]
)
)
def __init__(self, value, options):
# it can be a single expression, like a column
if not isinstance(options, ir.Expr):
if util.any_of(options, ir.Expr):
# or a list of expressions
options = ir.sequence(options)
else:
# or a set of scalar values
options = frozenset(options)
super().__init__(value, options)
def output_type(self):
all_args = [self.value]
if isinstance(self.options, ir.ListExpr):
all_args += self.options
else:
all_args += [self.options]
return rlz.shape_like(all_args, dt.boolean)
class NotContains(Contains):
pass
class ReplaceValues(ValueOp):
"""
Apply a multi-value replacement on a particular column. As an example from
SQL, given DAYOFWEEK(timestamp_col), replace 1 through 5 to "WEEKDAY" and 6
and 7 to "WEEKEND"
"""
pass
class SummaryFilter(ValueOp):
expr = Arg(rlz.noop)
def output_type(self):
return dt.boolean.column_type()
class TopK(ValueOp):
arg = Arg(rlz.noop)
k = Arg(int)
by = Arg(rlz.noop)
def __init__(self, arg, k, by=None):
if by is None:
by = arg.count()
if not isinstance(arg, ir.ColumnExpr):
raise TypeError(arg)
if not isinstance(k, int) or k < 0:
raise ValueError('k must be positive integer, was: {0}'.format(k))
super().__init__(arg, k, by)
def output_type(self):
return ir.TopKExpr
def blocks(self):
return True
class Constant(ValueOp):
pass
class TimestampNow(Constant):
def output_type(self):
return dt.timestamp.scalar_type()
class RandomScalar(Constant):
def output_type(self):
return dt.float64.scalar_type()
class E(Constant):
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class Pi(Constant):
"""
The constant pi
"""
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class TemporalUnaryOp(UnaryOp):
arg = Arg(rlz.temporal)
class TimestampUnaryOp(UnaryOp):
arg = Arg(rlz.timestamp)
_date_units = {
'Y': 'Y',
'y': 'Y',
'year': 'Y',
'YEAR': 'Y',
'YYYY': 'Y',
'SYYYY': 'Y',
'YYY': 'Y',
'YY': 'Y',
'Q': 'Q',
'q': 'Q',
'quarter': 'Q',
'QUARTER': 'Q',
'M': 'M',
'month': 'M',
'MONTH': 'M',
'w': 'W',
'W': 'W',
'week': 'W',
'WEEK': 'W',
'd': 'D',
'D': 'D',
'J': 'D',
'day': 'D',
'DAY': 'D',
}
_time_units = {
'h': 'h',
'H': 'h',
'HH24': 'h',
'hour': 'h',
'HOUR': 'h',
'm': 'm',
'MI': 'm',
'minute': 'm',
'MINUTE': 'm',
's': 's',
'second': 's',
'SECOND': 's',
'ms': 'ms',
'millisecond': 'ms',
'MILLISECOND': 'ms',
'us': 'us',
'microsecond': 'ms',
'MICROSECOND': 'ms',
'ns': 'ns',
'nanosecond': 'ns',
'NANOSECOND': 'ns',
}
_timestamp_units = toolz.merge(_date_units, _time_units)
class TimestampTruncate(ValueOp):
arg = Arg(rlz.timestamp)
unit = Arg(rlz.isin(_timestamp_units))
output_type = rlz.shape_like('arg', dt.timestamp)
class DateTruncate(ValueOp):
arg = Arg(rlz.date)
unit = Arg(rlz.isin(_date_units))
output_type = rlz.shape_like('arg', dt.date)
class TimeTruncate(ValueOp):
arg = Arg(rlz.time)
unit = Arg(rlz.isin(_time_units))
output_type = rlz.shape_like('arg', dt.time)
class Strftime(ValueOp):
arg = Arg(rlz.temporal)
format_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringToTimestamp(ValueOp):
arg = Arg(rlz.string)
format_str = Arg(rlz.string)
timezone = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.Timestamp(timezone='UTC'))
class ExtractTemporalField(TemporalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
ExtractTimestampField = ExtractTemporalField
class ExtractDateField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
class ExtractTimeField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.time, rlz.timestamp]))
class ExtractYear(ExtractDateField):
pass
class ExtractMonth(ExtractDateField):
pass
class ExtractDay(ExtractDateField):
pass
class ExtractDayOfYear(ExtractDateField):
pass
class ExtractQuarter(ExtractDateField):
pass
class ExtractEpochSeconds(ExtractDateField):
pass
class ExtractWeekOfYear(ExtractDateField):
pass
class ExtractHour(ExtractTimeField):
pass
class ExtractMinute(ExtractTimeField):
pass
class ExtractSecond(ExtractTimeField):
pass
class ExtractMillisecond(ExtractTimeField):
pass
class DayOfWeekIndex(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.int16)
class DayOfWeekName(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.string)
class DayOfWeekNode(Node):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
def output_type(self):
return ir.DayOfWeek
class Time(UnaryOp):
output_type = rlz.shape_like('arg', dt.time)
class Date(UnaryOp):
output_type = rlz.shape_like('arg', dt.date)
class TimestampFromUNIX(ValueOp):
arg = Arg(rlz.any)
# Only pandas-based backends support 'ns'
unit = Arg(rlz.isin({'s', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('arg', dt.timestamp)
class DecimalUnaryOp(UnaryOp):
arg = Arg(rlz.decimal)
class DecimalPrecision(DecimalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class DecimalScale(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class Hash(ValueOp):
arg = Arg(rlz.any)
how = Arg(rlz.isin({'fnv', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', dt.int64)
class HashBytes(ValueOp):
arg = Arg(rlz.one_of({rlz.value(dt.string), rlz.value(dt.binary)}))
how = Arg(rlz.isin({'md5', 'sha1', 'sha256', 'sha512'}))
output_type = rlz.shape_like('arg', dt.binary)
class DateAdd(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateSub(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateDiff(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.date)
output_type = rlz.shape_like('left', dt.Interval('D'))
class TimeAdd(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeSub(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeDiff(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.time)
output_type = rlz.shape_like('left', dt.Interval('s'))
class TimestampAdd(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampSub(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampDiff(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(rlz.timestamp)
output_type = rlz.shape_like('left', dt.Interval('s'))
class IntervalBinaryOp(BinaryOp):
def output_type(self):
args = [
arg.cast(arg.type().value_type)
if isinstance(arg.type(), dt.Interval)
else arg
for arg in self.args
]
expr = rlz.numeric_like(args, self.__class__.op)(self)
left_dtype = self.left.type()
dtype_type = type(left_dtype)
additional_args = {
attr: getattr(left_dtype, attr)
for attr in dtype_type.__slots__
if attr not in {'unit', 'value_type'}
}
dtype = dtype_type(left_dtype.unit, expr.type(), **additional_args)
return rlz.shape_like(self.args, dtype=dtype)
class IntervalAdd(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.add
class IntervalSubtract(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.sub
class IntervalMultiply(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.mul
class IntervalFloorDivide(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.floordiv
class IntervalFromInteger(ValueOp):
arg = Arg(rlz.integer)
unit = Arg(
rlz.isin({'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'})
)
@property
def resolution(self):
return dt.Interval(self.unit).resolution
def output_type(self):
dtype = dt.Interval(self.unit, self.arg.type())
return rlz.shape_like(self.arg, dtype=dtype)
class ArrayColumn(ValueOp):
cols = Arg(rlz.list_of(rlz.column(rlz.any), min_length=1))
def _validate(self):
if len({col.type() for col in self.cols}) > 1:
raise com.IbisTypeError(
f'The types of all input columns must match exactly in a '
f'{type(self).__name__} operation.'
)
def output_type(self):
first_dtype = self.cols[0].type()
return dt.Array(first_dtype).column_type()
class ArrayLength(UnaryOp):
arg = Arg(rlz.array)
output_type = rlz.shape_like('arg', dt.int64)
class ArraySlice(ValueOp):
arg = Arg(rlz.array)
start = Arg(rlz.integer)
stop = Arg(rlz.integer, default=None)
output_type = rlz.typeof('arg')
class ArrayIndex(ValueOp):
arg = Arg(rlz.array)
index = Arg(rlz.integer)
def output_type(self):
value_dtype = self.arg.type().value_type
return rlz.shape_like(self.arg, value_dtype)
class ArrayConcat(ValueOp):
left = Arg(rlz.array)
right = Arg(rlz.array)
output_type = rlz.shape_like('left')
def _validate(self):
left_dtype, right_dtype = self.left.type(), self.right.type()
if left_dtype != right_dtype:
raise com.IbisTypeError(
'Array types must match exactly in a {} operation. '
'Left type {} != Right type {}'.format(
type(self).__name__, left_dtype, right_dtype
)
)
class ArrayRepeat(ValueOp):
arg = Arg(rlz.array)
times = Arg(rlz.integer)
output_type = rlz.typeof('arg')
class ArrayCollect(Reduction):
arg = Arg(rlz.column(rlz.any))
def output_type(self):
dtype = dt.Array(self.arg.type())
return dtype.scalar_type()
class MapLength(ValueOp):
arg = Arg(rlz.mapping)
output_type = rlz.shape_like('arg', dt.int64)
class MapValueForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
def output_type(self):
return rlz.shape_like(tuple(self.args), self.arg.type().value_type)
class MapValueOrDefaultForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
default = Arg(rlz.any)
def output_type(self):
arg = self.arg
default = self.default
map_type = arg.type()
value_type = map_type.value_type
default_type = default.type()
if default is not None and not dt.same_kind(default_type, value_type):
raise com.IbisTypeError(
"Default value\n{}\nof type {} cannot be cast to map's value "
"type {}".format(default, default_type, value_type)
)
result_type = dt.highest_precedence((default_type, value_type))
return rlz.shape_like(tuple(self.args), result_type)
class MapKeys(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().key_type))
class MapValues(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().value_type))
class MapConcat(ValueOp):
left = Arg(rlz.mapping)
right = Arg(rlz.mapping)
output_type = rlz.typeof('left')
class StructField(ValueOp):
arg = Arg(rlz.struct)
field = Arg(str)
def output_type(self):
struct_dtype = self.arg.type()
value_dtype = struct_dtype[self.field]
return rlz.shape_like(self.arg, value_dtype)
class Literal(ValueOp):
value = Arg(rlz.noop)
dtype = Arg(dt.dtype)
def __repr__(self):
return '{}({})'.format(
type(self).__name__, ', '.join(map(repr, self.args))
)
def equals(self, other, cache=None):
# Check types
if not (
isinstance(other, Literal)
and isinstance(other.value, type(self.value))
and self.dtype == other.dtype
):
return False
# Check values
if isinstance(self.value, np.ndarray):
return np.array_equal(self.value, other.value)
else:
return self.value == other.value
def output_type(self):
return self.dtype.scalar_type()
def root_tables(self):
return []
def __hash__(self) -> int:
"""Return the hash of a literal value.
We override this method to make sure that we can handle things that
aren't eminently hashable like an ``array<array<int64>>``.
"""
return hash(self.dtype._literal_value_hash_key(self.value))
class NullLiteral(Literal):
"""Typeless NULL literal"""
value = Arg(type(None), default=None)
dtype = Arg(dt.Null, default=dt.null)
class ScalarParameter(ValueOp):
_counter = itertools.count()
dtype = Arg(dt.dtype)
counter = Arg(int, default=lambda: next(ScalarParameter._counter))
def resolve_name(self):
return 'param_{:d}'.format(self.counter)
def __repr__(self):
return '{}(type={})'.format(type(self).__name__, self.dtype)
def __hash__(self):
return hash((self.dtype, self.counter))
def output_type(self):
return self.dtype.scalar_type()
def equals(self, other, cache=None):
return (
isinstance(other, ScalarParameter)
and self.counter == other.counter
and self.dtype.equals(other.dtype, cache=cache)
)
@property
def inputs(self):
return ()
def root_tables(self):
return []
class ExpressionList(Node):
"""Data structure for a list of arbitrary expressions"""
exprs = Arg(rlz.noop)
def __init__(self, values):
super().__init__(list(map(rlz.any, values)))
@property
def inputs(self):
return (tuple(self.exprs),)
def root_tables(self):
return distinct_roots(self.exprs)
def output_type(self):
return ir.ExprList
class ValueList(ValueOp):
"""Data structure for a list of value expressions"""
values = Arg(rlz.noop)
display_argnames = False # disable showing argnames in repr
def __init__(self, values):
super().__init__(tuple(map(rlz.any, values)))
def output_type(self):
dtype = rlz.highest_precedence_dtype(self.values)
return functools.partial(ir.ListExpr, dtype=dtype)
def root_tables(self):
return distinct_roots(*self.values)
# ----------------------------------------------------------------------
# GeoSpatial operations
class GeoSpatialBinOp(BinaryOp):
"""Geo Spatial base binary"""
left = Arg(rlz.geospatial)
right = Arg(rlz.geospatial)
class GeoSpatialUnOp(UnaryOp):
"""Geo Spatial base unary"""
arg = Arg(rlz.geospatial)
class GeoDistance(GeoSpatialBinOp):
"""Returns minimum distance between two geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoContains(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoContainsProperly(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one,
and no boundary points are shared."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCovers(GeoSpatialBinOp):
"""Returns True if no point in Geometry B is outside Geometry A"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCoveredBy(GeoSpatialBinOp):
"""Returns True if no point in Geometry/Geography A is
outside Geometry/Geography B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCrosses(GeoSpatialBinOp):
"""Returns True if the supplied geometries have some, but not all,
interior points in common."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoDisjoint(GeoSpatialBinOp):
"""Returns True if the Geometries do not “spatially intersect” -
if they do not share any space together."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoEquals(GeoSpatialBinOp):
"""Returns True if the given geometries represent the same geometry."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoGeometryN(GeoSpatialUnOp):
"""Returns the Nth Geometry of a Multi geometry."""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoGeometryType(GeoSpatialUnOp):
"""Returns the type of the geometry."""
output_type = rlz.shape_like('args', dt.string)
class GeoIntersects(GeoSpatialBinOp):
"""Returns True if the Geometries/Geography “spatially intersect in 2D”
- (share any portion of space) and False if they don’t (they are Disjoint).
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIsValid(GeoSpatialUnOp):
"""Returns true if the geometry is well-formed."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoLineLocatePoint(GeoSpatialBinOp):
"""
Locate the distance a point falls along the length of a line.
Returns a float between zero and one representing the location of the
closest point on the linestring to the given point, as a fraction of the
total 2d line length.
"""
left = Arg(rlz.linestring)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.halffloat)
class GeoLineMerge(GeoSpatialUnOp):
"""
Merge a MultiLineString into a LineString.
Returns a (set of) LineString(s) formed by sewing together the
constituent line work of a multilinestring. If a geometry other than
a linestring or multilinestring is given, this will return an empty
geometry collection.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoLineSubstring(GeoSpatialUnOp):
"""
Clip a substring from a LineString.
Returns a linestring that is a substring of the input one, starting
and ending at the given fractions of the total 2d length. The second
and third arguments are floating point values between zero and one.
This only works with linestrings.
"""
arg = Arg(rlz.linestring)
start = Arg(rlz.floating)
end = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.linestring)
class GeoOrderingEquals(GeoSpatialBinOp):
"""
Check if two geometries are equal and have the same point ordering.
Returns true if the two geometries are equal and the coordinates
are in the same order.
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoOverlaps(GeoSpatialBinOp):
"""Returns True if the Geometries share space, are of the same dimension,
but are not completely contained by each other."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoTouches(GeoSpatialBinOp):
"""Returns True if the geometries have at least one point in common,
but their interiors do not intersect."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoUnaryUnion(Reduction):
"""Returns the pointwise union of the geometries in the column."""
arg = Arg(rlz.column(rlz.geospatial))
def output_type(self):
return dt.geometry.scalar_type()
class GeoUnion(GeoSpatialBinOp):
"""Returns the pointwise union of the two geometries."""
output_type = rlz.shape_like('args', dt.geometry)
class GeoArea(GeoSpatialUnOp):
"""Area of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoPerimeter(GeoSpatialUnOp):
"""Perimeter of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoLength(GeoSpatialUnOp):
"""Length of geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoMaxDistance(GeoSpatialBinOp):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoX(GeoSpatialUnOp):
"""Return the X coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoY(GeoSpatialUnOp):
"""Return the Y coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMax(GeoSpatialUnOp):
"""Returns X maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMax(GeoSpatialUnOp):
"""Returns Y maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoStartPoint(GeoSpatialUnOp):
"""Returns the first point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoEndPoint(GeoSpatialUnOp):
"""Returns the last point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoPoint(GeoSpatialBinOp):
"""
Return a point constructed on the fly from the provided coordinate values.
Constant coordinates result in construction of a POINT literal.
"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.point)
class GeoPointN(GeoSpatialUnOp):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
"""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.point)
class GeoNPoints(GeoSpatialUnOp):
"""Return the number of points in a geometry. Works for all geometries"""
output_type = rlz.shape_like('args', dt.int64)
class GeoNRings(GeoSpatialUnOp):
"""If the geometry is a polygon or multi-polygon returns the number of
rings. It counts the outer rings as well
"""
output_type = rlz.shape_like('args', dt.int64)
class GeoSRID(GeoSpatialUnOp):
"""Returns the spatial reference identifier for the ST_Geometry."""
output_type = rlz.shape_like('args', dt.int64)
class GeoSetSRID(GeoSpatialUnOp):
"""Set the spatial reference identifier for the ST_Geometry."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoBuffer(GeoSpatialUnOp):
"""Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry.
"""
radius = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.geometry)
class GeoCentroid(GeoSpatialUnOp):
"""Returns the geometric center of a geometry."""
output_type = rlz.shape_like('arg', dt.point)
class GeoDFullyWithin(GeoSpatialBinOp):
"""Returns True if the geometries are fully within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoDWithin(GeoSpatialBinOp):
"""Returns True if the geometries are within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoEnvelope(GeoSpatialUnOp):
"""Returns a geometry representing the boundingbox of the supplied geometry.
"""
output_type = rlz.shape_like('arg', dt.polygon)
class GeoAzimuth(GeoSpatialBinOp):
"""Returns the angle in radians from the horizontal of the vector defined
by pointA and pointB. Angle is computed clockwise from down-to-up:
on the clock: 12=0; 3=PI/2; 6=PI; 9=3PI/2.
"""
left = Arg(rlz.point)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.float64)
class GeoWithin(GeoSpatialBinOp):
"""Returns True if the geometry A is completely inside geometry B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIntersection(GeoSpatialBinOp):
"""Returns a geometry that represents the point set intersection
of the Geometries.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoDifference(GeoSpatialBinOp):
"""Returns a geometry that represents that part of geometry A
that does not intersect with geometry B
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoSimplify(GeoSpatialUnOp):
"""Returns a simplified version of the given geometry."""
tolerance = Arg(rlz.floating)
preserve_collapsed = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoTransform(GeoSpatialUnOp):
"""Returns a transformed version of the given geometry into a new SRID."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoAsBinary(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography without SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKB(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKT(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.string)
class GeoAsText(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography without SRID metadata.
"""
output_type = rlz.shape_like('arg', dt.string)
class ElementWiseVectorizedUDF(ValueOp):
"""Node for element wise UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ReductionVectorizedUDF(Reduction):
"""Node for reduction UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.scalar_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class AnalyticVectorizedUDF(AnalyticOp):
"""Node for analytics UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ExistsSubquery(Node):
"""Helper class"""
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
class NotExistsSubquery(Node):
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
|
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not rlz.comparable(self.base, case_expr):
raise TypeError(
'Base expression and passed case are not ' 'comparable'
)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(self.base, cases, results, self.default)
| 1,518
| 1,549
|
import collections
import functools
import itertools
import operator
from contextlib import suppress
from typing import Any, Dict, List
import numpy as np
import toolz
from cached_property import cached_property
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis import util
from ibis.expr.schema import HasSchema, Schema
from ibis.expr.signature import Annotable
from ibis.expr.signature import Argument as Arg
def _safe_repr(x, memo=None):
return x._repr(memo=memo) if isinstance(x, (ir.Expr, Node)) else repr(x)
# TODO: move to analysis
def distinct_roots(*expressions):
roots = toolz.concat(expr.op().root_tables() for expr in expressions)
return list(toolz.unique(roots))
class Node(Annotable):
__slots__ = '_expr_cached', '_hash'
def __repr__(self):
return self._repr()
def _repr(self, memo=None):
if memo is None:
from ibis.expr.format import FormatMemo
memo = FormatMemo()
opname = type(self).__name__
pprint_args = []
def _pp(x):
return _safe_repr(x, memo=memo)
for x in self.args:
if isinstance(x, (tuple, list)):
pp = repr(list(map(_pp, x)))
else:
pp = _pp(x)
pprint_args.append(pp)
return '{}({})'.format(opname, ', '.join(pprint_args))
def __getstate__(self) -> Dict[str, Any]:
"""The attributes _expr_cached and _hash are
used as caches; they can be excluded from
serialization without affecting correctness.
Excluding _expr_cached and _hash from serialization
will allow the serialized bytes to be the same for
equivalent Node objets.
Returns
-------
Dict[str, Any]
A dictionary storing the objects attributes.
"""
excluded_slots = {'_expr_cached', '_hash'}
return {
slot: getattr(self, slot)
for slot in self.__slots__
if slot not in excluded_slots
}
def __setstate__(self, state: Dict[str, Any]) -> None:
"""
Parameters
----------
state: Dict[str, Any]
A dictionary storing the objects attributes.
"""
for slot in state:
setattr(self, slot, state[slot])
@property
def inputs(self):
return tuple(self.args)
def blocks(self):
# The contents of this node at referentially distinct and may not be
# analyzed deeper
return False
def flat_args(self):
for arg in self.args:
if not isinstance(arg, str) and isinstance(
arg, collections.abc.Iterable
):
for x in arg:
yield x
else:
yield arg
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(
(type(self),)
+ tuple(
element.op() if isinstance(element, ir.Expr) else element
for element in self.flat_args()
)
)
return self._hash
def __eq__(self, other):
return self.equals(other)
def equals(self, other, cache=None):
if cache is None:
cache = {}
key = self, other
try:
return cache[key]
except KeyError:
cache[key] = result = self is other or (
type(self) == type(other)
and all_equal(self.args, other.args, cache=cache)
)
return result
def compatible_with(self, other):
return self.equals(other)
def is_ancestor(self, other):
if isinstance(other, ir.Expr):
other = other.op()
return self.equals(other)
def to_expr(self):
if not hasattr(self, '_expr_cached'):
self._expr_cached = self._make_expr()
return self._expr_cached
def _make_expr(self):
klass = self.output_type()
return klass(self)
def output_type(self):
"""
This function must resolve the output type of the expression and return
the node wrapped in the appropriate ValueExpr type.
"""
raise NotImplementedError
class ValueOp(Node):
def root_tables(self):
exprs = [arg for arg in self.args if isinstance(arg, ir.Expr)]
return distinct_roots(*exprs)
def resolve_name(self):
raise com.ExpressionError(f'Expression is not named: {type(self)}')
def has_resolved_name(self):
return False
def all_equal(left, right, cache=None):
"""Check whether two objects `left` and `right` are equal.
Parameters
----------
left : Union[object, Expr, Node]
right : Union[object, Expr, Node]
cache : Optional[Dict[Tuple[Node, Node], bool]]
A dictionary indicating whether two Nodes are equal
"""
if cache is None:
cache = {}
if util.is_iterable(left):
# check that left and right are equal length iterables and that all
# of their elements are equal
return (
util.is_iterable(right)
and len(left) == len(right)
and all(
itertools.starmap(
functools.partial(all_equal, cache=cache), zip(left, right)
)
)
)
if hasattr(left, 'equals'):
return left.equals(right, cache=cache)
return left == right
_table_names = ('unbound_table_{:d}'.format(i) for i in itertools.count())
def genname():
return next(_table_names)
class TableNode(Node):
def get_type(self, name):
return self.schema[name]
def output_type(self):
return ir.TableExpr
def aggregate(self, this, metrics, by=None, having=None):
return Aggregation(this, metrics, by=by, having=having)
def sort_by(self, expr, sort_exprs):
return Selection(expr, [], sort_keys=sort_exprs)
def is_ancestor(self, other):
import ibis.expr.lineage as lin
if isinstance(other, ir.Expr):
other = other.op()
if self.equals(other):
return True
fn = lambda e: (lin.proceed, e.op()) # noqa: E731
expr = self.to_expr()
for child in lin.traverse(fn, expr):
if child.equals(other):
return True
return False
class TableColumn(ValueOp):
"""Selects a column from a TableExpr"""
name = Arg((str, int))
table = Arg(ir.TableExpr)
def __init__(self, name, table):
schema = table.schema()
if isinstance(name, int):
name = schema.name_at_position(name)
super().__init__(name, table)
def _validate(self):
if self.name not in self.table.schema():
raise com.IbisTypeError(
"'{}' is not a field in {}".format(
self.name, self.table.columns
)
)
def parent(self):
return self.table
def resolve_name(self):
return self.name
def has_resolved_name(self):
return True
def root_tables(self):
return self.table.op().root_tables()
def _make_expr(self):
dtype = self.table._get_type(self.name)
klass = dtype.column_type()
return klass(self, name=self.name)
class RowID(ValueOp):
"""The row number (an autonumeric) of the returned result."""
def output_type(self):
return dt.int64.column_type()
def resolve_name(self):
return 'rowid'
def has_resolved_name(self):
return True
def find_all_base_tables(expr, memo=None):
if memo is None:
memo = {}
node = expr.op()
if isinstance(expr, ir.TableExpr) and node.blocks():
if expr not in memo:
memo[node] = expr
return memo
for arg in expr.op().flat_args():
if isinstance(arg, ir.Expr):
find_all_base_tables(arg, memo)
return memo
class PhysicalTable(TableNode, HasSchema):
def blocks(self):
return True
class UnboundTable(PhysicalTable):
schema = Arg(sch.Schema)
name = Arg(str, default=genname)
class DatabaseTable(PhysicalTable):
name = Arg(str)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def change_name(self, new_name):
return type(self)(new_name, self.args[1], self.source)
class SQLQueryResult(TableNode, HasSchema):
"""A table sourced from the result set of a select query"""
query = Arg(rlz.noop)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def blocks(self):
return True
class TableArrayView(ValueOp):
"""
(Temporary?) Helper operation class for SQL translation (fully formed table
subqueries to be viewed as arrays)
"""
table = Arg(ir.TableExpr)
name = Arg(str)
def __init__(self, table):
schema = table.schema()
if len(schema) > 1:
raise com.ExpressionError('Table can only have a single column')
name = schema.names[0]
return super().__init__(table, name)
def _make_expr(self):
ctype = self.table._get_type(self.name)
klass = ctype.column_type()
return klass(self, name=self.name)
class UnaryOp(ValueOp):
arg = Arg(rlz.any)
class BinaryOp(ValueOp):
"""A binary operation"""
left = Arg(rlz.any)
right = Arg(rlz.any)
class Cast(ValueOp):
arg = Arg(rlz.any)
to = Arg(dt.dtype)
# see #396 for the issue preventing this
# def resolve_name(self):
# return self.args[0].get_name()
def output_type(self):
return rlz.shape_like(self.arg, dtype=self.to)
class TypeOf(UnaryOp):
output_type = rlz.shape_like('arg', dt.string)
class Negate(UnaryOp):
arg = Arg(rlz.one_of((rlz.numeric(), rlz.interval())))
output_type = rlz.typeof('arg')
class IsNull(UnaryOp):
"""Returns true if values are null
Returns
-------
isnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class NotNull(UnaryOp):
"""Returns true if values are not null
Returns
-------
notnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class ZeroIfNull(UnaryOp):
output_type = rlz.typeof('arg')
class IfNull(ValueOp):
"""Equivalent to (but perhaps implemented differently):
case().when(expr.notnull(), expr)
.else_(null_substitute_expr)
"""
arg = Arg(rlz.any)
ifnull_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIf(ValueOp):
"""Set values to NULL if they equal the null_if_expr"""
arg = Arg(rlz.any)
null_if_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIfZero(ValueOp):
"""
Set values to NULL if they equal to zero. Commonly used in cases where
divide-by-zero would produce an overflow or infinity.
Equivalent to (value == 0).ifelse(ibis.NA, value)
Returns
-------
maybe_nulled : type of caller
"""
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class IsNan(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class IsInf(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class CoalesceLike(ValueOp):
# According to Impala documentation:
# Return type: same as the initial argument value, except that integer
# values are promoted to BIGINT and floating-point values are promoted to
# DOUBLE; use CAST() when inserting into a smaller numeric column
arg = Arg(rlz.list_of(rlz.any))
def output_type(self):
first = self.arg[0]
if isinstance(first, (ir.IntegerValue, ir.FloatingValue)):
dtype = first.type().largest
else:
dtype = first.type()
# self.arg is a list of value expressions
return rlz.shape_like(self.arg, dtype)
class Coalesce(CoalesceLike):
pass
class Greatest(CoalesceLike):
pass
class Least(CoalesceLike):
pass
class Abs(UnaryOp):
"""Absolute value"""
output_type = rlz.typeof('arg')
class Ceil(UnaryOp):
"""
Round up to the nearest integer value greater than or equal to this value
Returns
-------
ceiled : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Floor(UnaryOp):
"""
Round down to the nearest integer value less than or equal to this value
Returns
-------
floored : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Round(ValueOp):
arg = Arg(rlz.numeric)
digits = Arg(rlz.numeric, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
return self.arg._factory
elif self.digits is None:
return rlz.shape_like(self.arg, dt.int64)
else:
return rlz.shape_like(self.arg, dt.double)
class Clip(ValueOp):
arg = Arg(rlz.strict_numeric)
lower = Arg(rlz.strict_numeric, default=None)
upper = Arg(rlz.strict_numeric, default=None)
output_type = rlz.typeof('arg')
class BaseConvert(ValueOp):
arg = Arg(rlz.one_of([rlz.integer, rlz.string]))
from_base = Arg(rlz.integer)
to_base = Arg(rlz.integer)
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class MathUnaryOp(UnaryOp):
arg = Arg(rlz.numeric)
def output_type(self):
arg = self.arg
if isinstance(self.arg, ir.DecimalValue):
dtype = arg.type()
else:
dtype = dt.double
return rlz.shape_like(arg, dtype)
class ExpandingTypeMathUnaryOp(MathUnaryOp):
def output_type(self):
if not isinstance(self.arg, ir.DecimalValue):
return super().output_type()
arg = self.arg
return rlz.shape_like(arg, arg.type().largest)
class Exp(ExpandingTypeMathUnaryOp):
pass
class Sign(UnaryOp):
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class Sqrt(MathUnaryOp):
pass
class Logarithm(MathUnaryOp):
arg = Arg(rlz.strict_numeric)
class Log(Logarithm):
arg = Arg(rlz.strict_numeric)
base = Arg(rlz.strict_numeric, default=None)
class Ln(Logarithm):
"""Natural logarithm"""
class Log2(Logarithm):
"""Logarithm base 2"""
class Log10(Logarithm):
"""Logarithm base 10"""
class Degrees(ExpandingTypeMathUnaryOp):
"""Converts radians to degrees"""
arg = Arg(rlz.numeric)
class Radians(MathUnaryOp):
"""Converts degrees to radians"""
arg = Arg(rlz.numeric)
# TRIGONOMETRIC OPERATIONS
class TrigonometricUnary(MathUnaryOp):
"""Trigonometric base unary"""
arg = Arg(rlz.numeric)
class TrigonometricBinary(BinaryOp):
"""Trigonometric base binary"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.float64)
class Acos(TrigonometricUnary):
"""Returns the arc cosine of x"""
class Asin(TrigonometricUnary):
"""Returns the arc sine of x"""
class Atan(TrigonometricUnary):
"""Returns the arc tangent of x"""
class Atan2(TrigonometricBinary):
"""Returns the arc tangent of x and y"""
class Cos(TrigonometricUnary):
"""Returns the cosine of x"""
class Cot(TrigonometricUnary):
"""Returns the cotangent of x"""
class Sin(TrigonometricUnary):
"""Returns the sine of x"""
class Tan(TrigonometricUnary):
"""Returns the tangent of x"""
class StringUnaryOp(UnaryOp):
arg = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class Uppercase(StringUnaryOp):
"""Convert string to all uppercase"""
class Lowercase(StringUnaryOp):
"""Convert string to all lowercase"""
class Reverse(StringUnaryOp):
"""Reverse string"""
class Strip(StringUnaryOp):
"""Remove whitespace from left and right sides of string"""
class LStrip(StringUnaryOp):
"""Remove whitespace from left side of string"""
class RStrip(StringUnaryOp):
"""Remove whitespace from right side of string"""
class Capitalize(StringUnaryOp):
"""Return a capitalized version of input string"""
class Substring(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.integer)
length = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StrRight(ValueOp):
arg = Arg(rlz.string)
nchars = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class Repeat(ValueOp):
arg = Arg(rlz.string)
times = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class StringFind(ValueOp):
arg = Arg(rlz.string)
substr = Arg(rlz.string)
start = Arg(rlz.integer, default=None)
end = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.int64)
class Translate(ValueOp):
arg = Arg(rlz.string)
from_str = Arg(rlz.string)
to_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class LPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class RPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class FindInSet(ValueOp):
needle = Arg(rlz.string)
values = Arg(rlz.list_of(rlz.string, min_length=1))
output_type = rlz.shape_like('needle', dt.int64)
class StringJoin(ValueOp):
sep = Arg(rlz.string)
arg = Arg(rlz.list_of(rlz.string, min_length=1))
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class StartsWith(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class EndsWith(ValueOp):
arg = Arg(rlz.string)
end = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class BooleanValueOp:
pass
class FuzzySearch(ValueOp, BooleanValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.boolean)
class StringSQLLike(FuzzySearch):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
escape = Arg(str, default=None)
class StringSQLILike(StringSQLLike):
"""SQL ilike operation"""
class RegexSearch(FuzzySearch):
pass
class RegexExtract(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
index = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class RegexReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringSplit(ValueOp):
arg = Arg(rlz.string)
delimiter = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.Array(dt.string))
class StringConcat(ValueOp):
arg = Arg(rlz.list_of(rlz.string))
output_type = rlz.shape_like('arg', dt.string)
class ParseURL(ValueOp):
arg = Arg(rlz.string)
extract = Arg(
rlz.isin(
{
'PROTOCOL',
'HOST',
'PATH',
'REF',
'AUTHORITY',
'FILE',
'USERINFO',
'QUERY',
}
)
)
key = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StringLength(UnaryOp):
"""
Compute length of strings
Returns
-------
length : int32
"""
output_type = rlz.shape_like('arg', dt.int32)
class StringAscii(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
# ----------------------------------------------------------------------
class Reduction(ValueOp):
_reduction = True
class Count(Reduction):
arg = Arg((ir.ColumnExpr, ir.TableExpr))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class Arbitrary(Reduction):
arg = Arg(rlz.column(rlz.any))
how = Arg(rlz.isin({'first', 'last', 'heavy'}), default=None)
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitAnd(Reduction):
"""Aggregate bitwise AND operation.
All elements in an integer column are ANDed together. This can be used
to determine which bit flags are set on all elements.
Resources:
* `BigQuery BIT_AND
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_and>`_
* `MySQL BIT_AND
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-and>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitOr(Reduction):
"""Aggregate bitwise OR operation.
All elements in an integer column are ORed together. This can be used
to determine which bit flags are set on any element.
Resources:
* `BigQuery BIT_OR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_or>`_
* `MySQL BIT_OR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-or>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitXor(Reduction):
"""Aggregate bitwise XOR operation.
All elements in an integer column are XORed together. This can be used
as a parity checksum of element values.
Resources:
* `BigQuery BIT_XOR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_xor>`_
* `MySQL BIT_XOR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-xor>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Sum(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.scalar_type()
class Mean(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type()
else:
dtype = dt.float64
return dtype.scalar_type()
class Quantile(Reduction):
arg = Arg(rlz.any)
quantile = Arg(rlz.strict_numeric)
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.float64.scalar_type()
class MultiQuantile(Quantile):
arg = Arg(rlz.any)
quantile = Arg(rlz.value(dt.Array(dt.float64)))
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.Array(dt.float64).scalar_type()
class VarianceBase(Reduction):
arg = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.scalar_type()
class StandardDev(VarianceBase):
pass
class Variance(VarianceBase):
pass
class Correlation(Reduction):
"""Coefficient of correlation of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Covariance(Reduction):
"""Covariance of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Max(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Min(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class HLLCardinality(Reduction):
"""Approximate number of unique values using HyperLogLog algorithm.
Impala offers the NDV built-in function for this.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
# Impala 2.0 and higher returns a DOUBLE
# return ir.DoubleScalar
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class GroupConcat(Reduction):
arg = Arg(rlz.column(rlz.any))
sep = Arg(rlz.string, default=',')
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.string.scalar_type()
class CMSMedian(Reduction):
"""
Compute the approximate median of a set of comparable values using the
Count-Min-Sketch algorithm. Exposed in Impala using APPX_MEDIAN.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
# ----------------------------------------------------------------------
# Analytic functions
class AnalyticOp(ValueOp):
pass
class WindowOp(ValueOp):
expr = Arg(rlz.noop)
window = Arg(rlz.noop)
output_type = rlz.array_like('expr')
display_argnames = False
def __init__(self, expr, window):
from ibis.expr.analysis import is_analytic
from ibis.expr.window import propagate_down_window
if not is_analytic(expr):
raise com.IbisInputError(
'Expression does not contain a valid window operation'
)
table = ir.find_base_table(expr)
if table is not None:
window = window.bind(table)
if window.max_lookback is not None:
error_msg = (
"'max lookback' windows must be ordered "
"by a timestamp column"
)
if len(window._order_by) != 1:
raise com.IbisInputError(error_msg)
order_var = window._order_by[0].op().args[0]
if not isinstance(order_var.type(), dt.Timestamp):
raise com.IbisInputError(error_msg)
expr = propagate_down_window(expr, window)
super().__init__(expr, window)
def over(self, window):
new_window = self.window.combine(window)
return WindowOp(self.expr, new_window)
@property
def inputs(self):
return self.expr.op().inputs[0], self.window
def root_tables(self):
return distinct_roots(
self.expr, *self.window._order_by, *self.window._group_by
)
class ShiftBase(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
offset = Arg(rlz.one_of((rlz.integer, rlz.interval)), default=None)
default = Arg(rlz.any, default=None)
output_type = rlz.typeof('arg')
class Lag(ShiftBase):
pass
class Lead(ShiftBase):
pass
class RankBase(AnalyticOp):
def output_type(self):
return dt.int64.column_type()
class MinRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order.
Examples
--------
values ranks
1 0
1 0
2 2
2 2
2 2
3 5
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL RANK()
arg = Arg(rlz.column(rlz.any))
class DenseRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order, ignoring duplicate values.
Examples
--------
values ranks
1 0
1 0
2 1
2 1
2 1
3 2
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL DENSE_RANK()
arg = Arg(rlz.column(rlz.any))
class RowNumber(RankBase):
"""
Compute row number starting from 0 after sorting by column expression
Examples
--------
>>> import ibis
>>> t = ibis.table([('values', dt.int64)])
>>> w = ibis.window(order_by=t.values)
>>> row_num = ibis.row_number().over(w)
>>> result = t[t.values, row_num.name('row_num')]
Returns
-------
row_number : Int64Column, starting from 0
"""
# Equivalent to SQL ROW_NUMBER()
class CumulativeOp(AnalyticOp):
pass
class CumulativeSum(CumulativeOp):
"""Cumulative sum. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.column_type()
class CumulativeMean(CumulativeOp):
"""Cumulative mean. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.column_type()
class CumulativeMax(CumulativeOp):
"""Cumulative max. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class CumulativeMin(CumulativeOp):
"""Cumulative min. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class PercentRank(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.shape_like('arg', dt.double)
class NTile(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
buckets = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.int64)
class FirstValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class LastValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class NthValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
nth = Arg(rlz.integer)
output_type = rlz.typeof('arg')
# ----------------------------------------------------------------------
# Distinct stuff
class Distinct(TableNode, HasSchema):
"""
Distinct is a table-level unique-ing operation.
In SQL, you might have:
SELECT DISTINCT foo
FROM table
SELECT DISTINCT foo, bar
FROM table
"""
table = Arg(ir.TableExpr)
def _validate(self):
# check whether schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.table.schema()
def blocks(self):
return True
class DistinctColumn(ValueOp):
"""
COUNT(DISTINCT ...) is really just syntactic suger, but we provide a
distinct().count() nicety for users nonetheless.
For all intents and purposes, like Distinct, but can be distinguished later
for evaluation if the result should be array-like versus table-like. Also
for calling count()
"""
arg = Arg(rlz.noop)
output_type = rlz.typeof('arg')
def count(self):
"""Only valid if the distinct contains a single column"""
return CountDistinct(self.arg)
class CountDistinct(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.int64.scalar_type()
# ---------------------------------------------------------------------
# Boolean reductions and semi/anti join support
class Any(ValueOp):
# Depending on the kind of input boolean array, the result might either be
# array-like (an existence-type predicate) or scalar (a reduction)
arg = Arg(rlz.column(rlz.boolean))
@property
def _reduction(self):
roots = self.arg.op().root_tables()
return len(roots) < 2
def output_type(self):
if self._reduction:
return dt.boolean.scalar_type()
else:
return dt.boolean.column_type()
def negate(self):
return NotAny(self.arg)
class All(ValueOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.scalar_like('arg')
_reduction = True
def negate(self):
return NotAll(self.arg)
class NotAny(Any):
def negate(self):
return Any(self.arg)
class NotAll(All):
def negate(self):
return All(self.arg)
class CumulativeAny(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
class CumulativeAll(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
# ---------------------------------------------------------------------
class TypedCaseBuilder:
__slots__ = ()
def type(self):
types = [result.type() for result in self.results]
return dt.highest_precedence(types)
def else_(self, result_expr):
"""
Specify
Returns
-------
builder : CaseBuilder
"""
kwargs = {
slot: getattr(self, slot)
for slot in self.__slots__
if slot != 'default'
}
result_expr = ir.as_value_expr(result_expr)
kwargs['default'] = result_expr
# Maintain immutability
return type(self)(**kwargs)
def end(self):
default = self.default
if default is None:
default = ir.null().cast(self.type())
args = [
getattr(self, slot) for slot in self.__slots__ if slot != 'default'
]
args.append(default)
op = self.__class__.case_op(*args)
return op.to_expr()
class SimpleCase(ValueOp):
base = Arg(rlz.any)
cases = Arg(rlz.list_of(rlz.any))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
return distinct_roots(
*itertools.chain(
[self.base],
self.cases,
self.results,
[] if self.default is None else [self.default],
)
)
def output_type(self):
exprs = self.results + [self.default]
return rlz.shape_like(self.base, dtype=exprs.type())
class SimpleCaseBuilder(TypedCaseBuilder):
__slots__ = 'base', 'cases', 'results', 'default'
case_op = SimpleCase
def __init__(self, base, cases=None, results=None, default=None):
self.base = base
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not rlz.comparable(self.base, case_expr):
raise TypeError(
'Base expression and passed case are not ' 'comparable'
)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(self.base, cases, results, self.default)
class SearchedCase(ValueOp):
cases = Arg(rlz.list_of(rlz.boolean))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
cases, results, default = self.args
return distinct_roots(
*itertools.chain(
cases.values,
results.values,
[] if default is None else [default],
)
)
def output_type(self):
exprs = self.results + [self.default]
dtype = rlz.highest_precedence_dtype(exprs)
return rlz.shape_like(self.cases, dtype)
class SearchedCaseBuilder(TypedCaseBuilder):
__slots__ = 'cases', 'results', 'default'
case_op = SearchedCase
def __init__(self, cases=None, results=None, default=None):
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(cases, results, self.default)
class Where(ValueOp):
"""
Ternary case expression, equivalent to
bool_expr.case()
.when(True, true_expr)
.else_(false_or_null_expr)
"""
bool_expr = Arg(rlz.boolean)
true_expr = Arg(rlz.any)
false_null_expr = Arg(rlz.any)
def output_type(self):
return rlz.shape_like(self.bool_expr, self.true_expr.type())
def _validate_join_tables(left, right):
if not isinstance(left, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'left table'.format(type(left).__name__)
)
if not isinstance(right, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'right table'.format(type(right).__name__)
)
def _make_distinct_join_predicates(left, right, predicates):
# see GH #667
# If left and right table have a common parent expression (e.g. they
# have different filters), must add a self-reference and make the
# appropriate substitution in the join predicates
if left.equals(right):
right = right.view()
predicates = _clean_join_predicates(left, right, predicates)
return left, right, predicates
def _clean_join_predicates(left, right, predicates):
import ibis.expr.analysis as L
result = []
if not isinstance(predicates, (list, tuple)):
predicates = [predicates]
for pred in predicates:
if isinstance(pred, tuple):
if len(pred) != 2:
raise com.ExpressionError('Join key tuple must be ' 'length 2')
lk, rk = pred
lk = left._ensure_expr(lk)
rk = right._ensure_expr(rk)
pred = lk == rk
elif isinstance(pred, str):
pred = left[pred] == right[pred]
elif not isinstance(pred, ir.Expr):
raise NotImplementedError
if not isinstance(pred, ir.BooleanColumn):
raise com.ExpressionError('Join predicate must be comparison')
preds = L.flatten_predicate(pred)
result.extend(preds)
_validate_join_predicates(left, right, result)
return result
def _validate_join_predicates(left, right, predicates):
from ibis.expr.analysis import fully_originate_from
# Validate join predicates. Each predicate must be valid jointly when
# considering the roots of each input table
for predicate in predicates:
if not fully_originate_from(predicate, [left, right]):
raise com.RelationError(
'The expression {!r} does not fully '
'originate from dependencies of the table '
'expression.'.format(predicate)
)
class Join(TableNode):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def __init__(self, left, right, predicates):
_validate_join_tables(left, right)
left, right, predicates = _make_distinct_join_predicates(
left, right, predicates
)
super().__init__(left, right, predicates)
def _get_schema(self):
# For joins retaining both table schemas, merge them together here
left = self.left
right = self.right
if not left._is_materialized():
left = left.materialize()
if not right._is_materialized():
right = right.materialize()
sleft = left.schema()
sright = right.schema()
overlap = set(sleft.names) & set(sright.names)
if overlap:
raise com.RelationError(
'Joined tables have overlapping names: %s' % str(list(overlap))
)
return sleft.append(sright)
def has_schema(self):
return False
def root_tables(self):
if util.all_of([self.left.op(), self.right.op()], (Join, Selection)):
# Unraveling is not possible
return [self.left.op(), self.right.op()]
else:
return distinct_roots(self.left, self.right)
class InnerJoin(Join):
pass
class LeftJoin(Join):
pass
class RightJoin(Join):
pass
class OuterJoin(Join):
pass
class AnyInnerJoin(Join):
pass
class AnyLeftJoin(Join):
pass
class LeftSemiJoin(Join):
def _get_schema(self):
return self.left.schema()
class LeftAntiJoin(Join):
def _get_schema(self):
return self.left.schema()
class MaterializedJoin(TableNode, HasSchema):
join = Arg(ir.TableExpr)
def _validate(self):
assert isinstance(self.join.op(), Join)
# check whether the underlying schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.join.op()._get_schema()
def root_tables(self):
return self.join.op().root_tables()
def blocks(self):
return True
class CrossJoin(InnerJoin):
"""
Some databases have a CROSS JOIN operator, that may be preferential to use
over an INNER JOIN with no predicates.
"""
def __init__(self, *args, **kwargs):
if 'prefixes' in kwargs:
raise NotImplementedError
if len(args) < 2:
raise com.IbisInputError('Must pass at least 2 tables')
left = args[0]
right = args[1]
for t in args[2:]:
right = right.cross_join(t)
InnerJoin.__init__(self, left, right, [])
class AsOfJoin(Join):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
by = Arg(rlz.noop, default=None)
tolerance = Arg(rlz.interval(), default=None)
def __init__(self, left, right, predicates, by, tolerance):
super().__init__(left, right, predicates)
self.by = _clean_join_predicates(self.left, self.right, by)
self.tolerance = tolerance
self._validate_args(['by', 'tolerance'])
def _validate_args(self, args: List[str]):
for arg in args:
argument = self.signature[arg]
value = argument.validate(getattr(self, arg))
setattr(self, arg, value)
class SetOp(TableNode, HasSchema):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
def _validate(self):
if not self.left.schema().equals(self.right.schema()):
raise com.RelationError(
'Table schemas must be equal for set operations'
)
@cached_property
def schema(self):
return self.left.schema()
def blocks(self):
return True
class Union(SetOp):
distinct = Arg(rlz.validator(bool), default=False)
class Intersection(SetOp):
pass
class Difference(SetOp):
pass
class Limit(TableNode):
table = Arg(ir.TableExpr)
n = Arg(rlz.validator(int))
offset = Arg(rlz.validator(int))
def blocks(self):
return True
@property
def schema(self):
return self.table.schema()
def has_schema(self):
return self.table.op().has_schema()
def root_tables(self):
return [self]
# --------------------------------------------------------------------
# Sorting
def to_sort_key(table, key):
if isinstance(key, DeferredSortKey):
key = key.resolve(table)
if isinstance(key, ir.SortExpr):
return key
if isinstance(key, (tuple, list)):
key, sort_order = key
else:
sort_order = True
if not isinstance(key, ir.Expr):
key = table._ensure_expr(key)
if isinstance(key, (ir.SortExpr, DeferredSortKey)):
return to_sort_key(table, key)
if isinstance(sort_order, str):
if sort_order.lower() in ('desc', 'descending'):
sort_order = False
elif not isinstance(sort_order, bool):
sort_order = bool(sort_order)
return SortKey(key, ascending=sort_order).to_expr()
class SortKey(Node):
expr = Arg(rlz.column(rlz.any))
ascending = Arg(rlz.validator(bool), default=True)
def __repr__(self):
# Temporary
rows = [
'Sort key:',
' ascending: {0!s}'.format(self.ascending),
util.indent(_safe_repr(self.expr), 2),
]
return '\n'.join(rows)
def output_type(self):
return ir.SortExpr
def root_tables(self):
return self.expr.op().root_tables()
def equals(self, other, cache=None):
# TODO: might generalize this equals based on fields
# requires a proxy class with equals for non expr values
return (
isinstance(other, SortKey)
and self.expr.equals(other.expr, cache=cache)
and self.ascending == other.ascending
)
def resolve_name(self):
return self.expr.get_name()
class DeferredSortKey:
def __init__(self, what, ascending=True):
self.what = what
self.ascending = ascending
def resolve(self, parent):
what = parent._ensure_expr(self.what)
return SortKey(what, ascending=self.ascending).to_expr()
class SelfReference(TableNode, HasSchema):
table = Arg(ir.TableExpr)
@cached_property
def schema(self):
return self.table.schema()
def root_tables(self):
# The dependencies of this operation are not walked, which makes the
# table expression holding this relationally distinct from other
# expressions, so things like self-joins are possible
return [self]
def blocks(self):
return True
class Selection(TableNode, HasSchema):
table = Arg(ir.TableExpr)
selections = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self, table, selections=None, predicates=None, sort_keys=None
):
import ibis.expr.analysis as L
# Argument cleaning
selections = util.promote_list(
selections if selections is not None else []
)
projections = []
for selection in selections:
if isinstance(selection, str):
projection = table[selection]
else:
projection = selection
projections.append(projection)
sort_keys = [
to_sort_key(table, k)
for k in util.promote_list(
sort_keys if sort_keys is not None else []
)
]
predicates = list(
toolz.concat(
map(
L.flatten_predicate,
predicates if predicates is not None else [],
)
)
)
super().__init__(
table=table,
selections=projections,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator
# Need to validate that the column expressions are compatible with the
# input table; this means they must either be scalar expressions or
# array expressions originating from the same root table expression
dependent_exprs = self.selections + self.sort_keys
self.table._assert_valid(dependent_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate no overlapping columns in schema
assert self.schema
@cached_property
def schema(self):
# Resolve schema and initialize
if not self.selections:
return self.table.schema()
types = []
names = []
for projection in self.selections:
if isinstance(projection, ir.DestructColumn):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = projection.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
elif isinstance(projection, ir.ValueExpr):
names.append(projection.get_name())
types.append(projection.type())
elif isinstance(projection, ir.TableExpr):
schema = projection.schema()
names.extend(schema.names)
types.extend(schema.types)
return Schema(names, types)
def blocks(self):
return bool(self.selections)
def substitute_table(self, table_expr):
return Selection(table_expr, self.selections)
def root_tables(self):
return [self]
def can_add_filters(self, wrapped_expr, predicates):
pass
@staticmethod
def empty_or_equal(lefts, rights):
return not lefts or not rights or all_equal(lefts, rights)
def compatible_with(self, other):
# self and other are equivalent except for predicates, selections, or
# sort keys any of which is allowed to be empty. If both are not empty
# then they must be equal
if self.equals(other):
return True
if not isinstance(other, type(self)):
return False
return self.table.equals(other.table) and (
self.empty_or_equal(self.predicates, other.predicates)
and self.empty_or_equal(self.selections, other.selections)
and self.empty_or_equal(self.sort_keys, other.sort_keys)
)
# Operator combination / fusion logic
def aggregate(self, this, metrics, by=None, having=None):
if len(self.selections) > 0:
return Aggregation(this, metrics, by=by, having=having)
else:
helper = AggregateSelection(this, metrics, by, having)
return helper.get_result()
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
if not self.blocks():
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Selection(
self.table,
self.selections,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class AggregateSelection:
# sort keys cannot be discarded because of order-dependent
# aggregate functions like GROUP_CONCAT
def __init__(self, parent, metrics, by, having):
self.parent = parent
self.op = parent.op()
self.metrics = metrics
self.by = by
self.having = having
def get_result(self):
if self.op.blocks():
return self._plain_subquery()
else:
return self._attempt_pushdown()
def _plain_subquery(self):
return Aggregation(
self.parent, self.metrics, by=self.by, having=self.having
)
def _attempt_pushdown(self):
metrics_valid, lowered_metrics = self._pushdown_exprs(self.metrics)
by_valid, lowered_by = self._pushdown_exprs(self.by)
having_valid, lowered_having = self._pushdown_exprs(
self.having or None
)
if metrics_valid and by_valid and having_valid:
return Aggregation(
self.op.table,
lowered_metrics,
by=lowered_by,
having=lowered_having,
predicates=self.op.predicates,
sort_keys=self.op.sort_keys,
)
else:
return self._plain_subquery()
def _pushdown_exprs(self, exprs):
import ibis.expr.analysis as L
if exprs is None:
return True, []
resolved = self.op.table._resolve(exprs)
subbed_exprs = []
valid = False
if resolved:
for x in util.promote_list(resolved):
subbed = L.sub_for(x, [(self.parent, self.op.table)])
subbed_exprs.append(subbed)
valid = self.op.table._is_valid(subbed_exprs)
else:
valid = False
return valid, subbed_exprs
def _maybe_convert_sort_keys(table, exprs):
try:
return [to_sort_key(table, k) for k in util.promote_list(exprs)]
except com.IbisError:
return None
class Aggregation(TableNode, HasSchema):
"""
metrics : per-group scalar aggregates
by : group expressions
having : post-aggregation predicate
TODO: not putting this in the aggregate operation yet
where : pre-aggregation predicate
"""
table = Arg(ir.TableExpr)
metrics = Arg(rlz.noop)
by = Arg(rlz.noop)
having = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self,
table,
metrics,
by=None,
having=None,
predicates=None,
sort_keys=None,
):
# For tables, like joins, that are not materialized
metrics = self._rewrite_exprs(table, metrics)
by = [] if by is None else by
by = table._resolve(by)
having = [] if having is None else having
predicates = [] if predicates is None else predicates
# order by only makes sense with group by in an aggregation
sort_keys = [] if not by or sort_keys is None else sort_keys
sort_keys = [
to_sort_key(table, k) for k in util.promote_list(sort_keys)
]
by = self._rewrite_exprs(table, by)
having = self._rewrite_exprs(table, having)
predicates = self._rewrite_exprs(table, predicates)
sort_keys = self._rewrite_exprs(table, sort_keys)
super().__init__(
table=table,
metrics=metrics,
by=by,
having=having,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator, is_reduction
# All aggregates are valid
for expr in self.metrics:
if not isinstance(expr, ir.ScalarExpr) or not is_reduction(expr):
raise TypeError(
'Passed a non-aggregate expression: %s' % _safe_repr(expr)
)
for expr in self.having:
if not isinstance(expr, ir.BooleanScalar):
raise com.ExpressionError(
'Having clause must be boolean '
'expression, was: {0!s}'.format(_safe_repr(expr))
)
# All non-scalar refs originate from the input table
all_exprs = self.metrics + self.by + self.having + self.sort_keys
self.table._assert_valid(all_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate schema has no overlapping columns
assert self.schema
def _rewrite_exprs(self, table, what):
what = util.promote_list(what)
all_exprs = []
for expr in what:
if isinstance(expr, ir.ExprList):
all_exprs.extend(expr.exprs())
else:
bound_expr = ir.bind_expr(table, expr)
all_exprs.append(bound_expr)
return all_exprs
# TODO - #2832
# this optimization becomes O(n^2) when it calls into
# _lift_TableColumn in analysis.py, which itself is O(n) and is
# called on each input to the aggregation - thus creating the
# aggregation expression can be extremely slow on wide tables
# that contain a Selection.
# return [
# substitute_parents(x, past_projection=False) for x in all_exprs
# ]
def blocks(self):
return True
def substitute_table(self, table_expr):
return Aggregation(
table_expr, self.metrics, by=self.by, having=self.having
)
@cached_property
def schema(self):
names = []
types = []
for e in self.by + self.metrics:
if isinstance(e, ir.DestructValue):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = e.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
else:
names.append(e.get_name())
types.append(e.type())
return Schema(names, types)
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Aggregation(
self.table,
self.metrics,
by=self.by,
having=self.having,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class NumericBinaryOp(BinaryOp):
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
class Add(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.add)
class Multiply(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mul)
class Power(NumericBinaryOp):
def output_type(self):
if util.all_of(self.args, ir.IntegerValue):
return rlz.shape_like(self.args, dt.float64)
else:
return rlz.shape_like(self.args)
class Subtract(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.sub)
class Divide(NumericBinaryOp):
output_type = rlz.shape_like('args', dt.float64)
class FloorDivide(Divide):
output_type = rlz.shape_like('args', dt.int64)
class LogicalBinaryOp(BinaryOp):
left = Arg(rlz.boolean)
right = Arg(rlz.boolean)
output_type = rlz.shape_like('args', dt.boolean)
class Not(UnaryOp):
arg = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.boolean)
class Modulus(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mod)
class And(LogicalBinaryOp):
pass
class Or(LogicalBinaryOp):
pass
class Xor(LogicalBinaryOp):
pass
class Comparison(BinaryOp, BooleanValueOp):
left = Arg(rlz.any)
right = Arg(rlz.any)
def __init__(self, left, right):
"""
Casting rules for type promotions (for resolving the output type) may
depend in some cases on the target backend.
TODO: how will overflows be handled? Can we provide anything useful in
Ibis to help the user avoid them?
:param left:
:param right:
"""
super().__init__(*self._maybe_cast_args(left, right))
def _maybe_cast_args(self, left, right):
# it might not be necessary?
with suppress(com.IbisTypeError):
return left, rlz.cast(right, left)
with suppress(com.IbisTypeError):
return rlz.cast(left, right), right
return left, right
def output_type(self):
if not rlz.comparable(self.left, self.right):
raise TypeError(
'Arguments with datatype {} and {} are '
'not comparable'.format(self.left.type(), self.right.type())
)
return rlz.shape_like(self.args, dt.boolean)
class Equals(Comparison):
pass
class NotEquals(Comparison):
pass
class GreaterEqual(Comparison):
pass
class Greater(Comparison):
pass
class LessEqual(Comparison):
pass
class Less(Comparison):
pass
class IdenticalTo(Comparison):
pass
class Between(ValueOp, BooleanValueOp):
arg = Arg(rlz.any)
lower_bound = Arg(rlz.any)
upper_bound = Arg(rlz.any)
def output_type(self):
arg, lower, upper = self.args
if not (rlz.comparable(arg, lower) and rlz.comparable(arg, upper)):
raise TypeError('Arguments are not comparable')
return rlz.shape_like(self.args, dt.boolean)
class BetweenTime(Between):
arg = Arg(rlz.one_of([rlz.timestamp, rlz.time]))
lower_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
upper_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
class Contains(ValueOp, BooleanValueOp):
value = Arg(rlz.any)
options = Arg(
rlz.one_of(
[
rlz.list_of(rlz.any),
rlz.set_,
rlz.column(rlz.any),
rlz.array_of(rlz.any),
]
)
)
def __init__(self, value, options):
# it can be a single expression, like a column
if not isinstance(options, ir.Expr):
if util.any_of(options, ir.Expr):
# or a list of expressions
options = ir.sequence(options)
else:
# or a set of scalar values
options = frozenset(options)
super().__init__(value, options)
def output_type(self):
all_args = [self.value]
if isinstance(self.options, ir.ListExpr):
all_args += self.options
else:
all_args += [self.options]
return rlz.shape_like(all_args, dt.boolean)
class NotContains(Contains):
pass
class ReplaceValues(ValueOp):
"""
Apply a multi-value replacement on a particular column. As an example from
SQL, given DAYOFWEEK(timestamp_col), replace 1 through 5 to "WEEKDAY" and 6
and 7 to "WEEKEND"
"""
pass
class SummaryFilter(ValueOp):
expr = Arg(rlz.noop)
def output_type(self):
return dt.boolean.column_type()
class TopK(ValueOp):
arg = Arg(rlz.noop)
k = Arg(int)
by = Arg(rlz.noop)
def __init__(self, arg, k, by=None):
if by is None:
by = arg.count()
if not isinstance(arg, ir.ColumnExpr):
raise TypeError(arg)
if not isinstance(k, int) or k < 0:
raise ValueError('k must be positive integer, was: {0}'.format(k))
super().__init__(arg, k, by)
def output_type(self):
return ir.TopKExpr
def blocks(self):
return True
class Constant(ValueOp):
pass
class TimestampNow(Constant):
def output_type(self):
return dt.timestamp.scalar_type()
class RandomScalar(Constant):
def output_type(self):
return dt.float64.scalar_type()
class E(Constant):
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class Pi(Constant):
"""
The constant pi
"""
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class TemporalUnaryOp(UnaryOp):
arg = Arg(rlz.temporal)
class TimestampUnaryOp(UnaryOp):
arg = Arg(rlz.timestamp)
_date_units = {
'Y': 'Y',
'y': 'Y',
'year': 'Y',
'YEAR': 'Y',
'YYYY': 'Y',
'SYYYY': 'Y',
'YYY': 'Y',
'YY': 'Y',
'Q': 'Q',
'q': 'Q',
'quarter': 'Q',
'QUARTER': 'Q',
'M': 'M',
'month': 'M',
'MONTH': 'M',
'w': 'W',
'W': 'W',
'week': 'W',
'WEEK': 'W',
'd': 'D',
'D': 'D',
'J': 'D',
'day': 'D',
'DAY': 'D',
}
_time_units = {
'h': 'h',
'H': 'h',
'HH24': 'h',
'hour': 'h',
'HOUR': 'h',
'm': 'm',
'MI': 'm',
'minute': 'm',
'MINUTE': 'm',
's': 's',
'second': 's',
'SECOND': 's',
'ms': 'ms',
'millisecond': 'ms',
'MILLISECOND': 'ms',
'us': 'us',
'microsecond': 'ms',
'MICROSECOND': 'ms',
'ns': 'ns',
'nanosecond': 'ns',
'NANOSECOND': 'ns',
}
_timestamp_units = toolz.merge(_date_units, _time_units)
class TimestampTruncate(ValueOp):
arg = Arg(rlz.timestamp)
unit = Arg(rlz.isin(_timestamp_units))
output_type = rlz.shape_like('arg', dt.timestamp)
class DateTruncate(ValueOp):
arg = Arg(rlz.date)
unit = Arg(rlz.isin(_date_units))
output_type = rlz.shape_like('arg', dt.date)
class TimeTruncate(ValueOp):
arg = Arg(rlz.time)
unit = Arg(rlz.isin(_time_units))
output_type = rlz.shape_like('arg', dt.time)
class Strftime(ValueOp):
arg = Arg(rlz.temporal)
format_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringToTimestamp(ValueOp):
arg = Arg(rlz.string)
format_str = Arg(rlz.string)
timezone = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.Timestamp(timezone='UTC'))
class ExtractTemporalField(TemporalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
ExtractTimestampField = ExtractTemporalField
class ExtractDateField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
class ExtractTimeField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.time, rlz.timestamp]))
class ExtractYear(ExtractDateField):
pass
class ExtractMonth(ExtractDateField):
pass
class ExtractDay(ExtractDateField):
pass
class ExtractDayOfYear(ExtractDateField):
pass
class ExtractQuarter(ExtractDateField):
pass
class ExtractEpochSeconds(ExtractDateField):
pass
class ExtractWeekOfYear(ExtractDateField):
pass
class ExtractHour(ExtractTimeField):
pass
class ExtractMinute(ExtractTimeField):
pass
class ExtractSecond(ExtractTimeField):
pass
class ExtractMillisecond(ExtractTimeField):
pass
class DayOfWeekIndex(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.int16)
class DayOfWeekName(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.string)
class DayOfWeekNode(Node):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
def output_type(self):
return ir.DayOfWeek
class Time(UnaryOp):
output_type = rlz.shape_like('arg', dt.time)
class Date(UnaryOp):
output_type = rlz.shape_like('arg', dt.date)
class TimestampFromUNIX(ValueOp):
arg = Arg(rlz.any)
# Only pandas-based backends support 'ns'
unit = Arg(rlz.isin({'s', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('arg', dt.timestamp)
class DecimalUnaryOp(UnaryOp):
arg = Arg(rlz.decimal)
class DecimalPrecision(DecimalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class DecimalScale(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class Hash(ValueOp):
arg = Arg(rlz.any)
how = Arg(rlz.isin({'fnv', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', dt.int64)
class HashBytes(ValueOp):
arg = Arg(rlz.one_of({rlz.value(dt.string), rlz.value(dt.binary)}))
how = Arg(rlz.isin({'md5', 'sha1', 'sha256', 'sha512'}))
output_type = rlz.shape_like('arg', dt.binary)
class DateAdd(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateSub(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateDiff(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.date)
output_type = rlz.shape_like('left', dt.Interval('D'))
class TimeAdd(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeSub(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeDiff(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.time)
output_type = rlz.shape_like('left', dt.Interval('s'))
class TimestampAdd(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampSub(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampDiff(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(rlz.timestamp)
output_type = rlz.shape_like('left', dt.Interval('s'))
class IntervalBinaryOp(BinaryOp):
def output_type(self):
args = [
arg.cast(arg.type().value_type)
if isinstance(arg.type(), dt.Interval)
else arg
for arg in self.args
]
expr = rlz.numeric_like(args, self.__class__.op)(self)
left_dtype = self.left.type()
dtype_type = type(left_dtype)
additional_args = {
attr: getattr(left_dtype, attr)
for attr in dtype_type.__slots__
if attr not in {'unit', 'value_type'}
}
dtype = dtype_type(left_dtype.unit, expr.type(), **additional_args)
return rlz.shape_like(self.args, dtype=dtype)
class IntervalAdd(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.add
class IntervalSubtract(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.sub
class IntervalMultiply(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.mul
class IntervalFloorDivide(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.floordiv
class IntervalFromInteger(ValueOp):
arg = Arg(rlz.integer)
unit = Arg(
rlz.isin({'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'})
)
@property
def resolution(self):
return dt.Interval(self.unit).resolution
def output_type(self):
dtype = dt.Interval(self.unit, self.arg.type())
return rlz.shape_like(self.arg, dtype=dtype)
class ArrayColumn(ValueOp):
cols = Arg(rlz.list_of(rlz.column(rlz.any), min_length=1))
def _validate(self):
if len({col.type() for col in self.cols}) > 1:
raise com.IbisTypeError(
f'The types of all input columns must match exactly in a '
f'{type(self).__name__} operation.'
)
def output_type(self):
first_dtype = self.cols[0].type()
return dt.Array(first_dtype).column_type()
class ArrayLength(UnaryOp):
arg = Arg(rlz.array)
output_type = rlz.shape_like('arg', dt.int64)
class ArraySlice(ValueOp):
arg = Arg(rlz.array)
start = Arg(rlz.integer)
stop = Arg(rlz.integer, default=None)
output_type = rlz.typeof('arg')
class ArrayIndex(ValueOp):
arg = Arg(rlz.array)
index = Arg(rlz.integer)
def output_type(self):
value_dtype = self.arg.type().value_type
return rlz.shape_like(self.arg, value_dtype)
class ArrayConcat(ValueOp):
left = Arg(rlz.array)
right = Arg(rlz.array)
output_type = rlz.shape_like('left')
def _validate(self):
left_dtype, right_dtype = self.left.type(), self.right.type()
if left_dtype != right_dtype:
raise com.IbisTypeError(
'Array types must match exactly in a {} operation. '
'Left type {} != Right type {}'.format(
type(self).__name__, left_dtype, right_dtype
)
)
class ArrayRepeat(ValueOp):
arg = Arg(rlz.array)
times = Arg(rlz.integer)
output_type = rlz.typeof('arg')
class ArrayCollect(Reduction):
arg = Arg(rlz.column(rlz.any))
def output_type(self):
dtype = dt.Array(self.arg.type())
return dtype.scalar_type()
class MapLength(ValueOp):
arg = Arg(rlz.mapping)
output_type = rlz.shape_like('arg', dt.int64)
class MapValueForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
def output_type(self):
return rlz.shape_like(tuple(self.args), self.arg.type().value_type)
class MapValueOrDefaultForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
default = Arg(rlz.any)
def output_type(self):
arg = self.arg
default = self.default
map_type = arg.type()
value_type = map_type.value_type
default_type = default.type()
if default is not None and not dt.same_kind(default_type, value_type):
raise com.IbisTypeError(
"Default value\n{}\nof type {} cannot be cast to map's value "
"type {}".format(default, default_type, value_type)
)
result_type = dt.highest_precedence((default_type, value_type))
return rlz.shape_like(tuple(self.args), result_type)
class MapKeys(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().key_type))
class MapValues(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().value_type))
class MapConcat(ValueOp):
left = Arg(rlz.mapping)
right = Arg(rlz.mapping)
output_type = rlz.typeof('left')
class StructField(ValueOp):
arg = Arg(rlz.struct)
field = Arg(str)
def output_type(self):
struct_dtype = self.arg.type()
value_dtype = struct_dtype[self.field]
return rlz.shape_like(self.arg, value_dtype)
class Literal(ValueOp):
value = Arg(rlz.noop)
dtype = Arg(dt.dtype)
def __repr__(self):
return '{}({})'.format(
type(self).__name__, ', '.join(map(repr, self.args))
)
def equals(self, other, cache=None):
# Check types
if not (
isinstance(other, Literal)
and isinstance(other.value, type(self.value))
and self.dtype == other.dtype
):
return False
# Check values
if isinstance(self.value, np.ndarray):
return np.array_equal(self.value, other.value)
else:
return self.value == other.value
def output_type(self):
return self.dtype.scalar_type()
def root_tables(self):
return []
def __hash__(self) -> int:
"""Return the hash of a literal value.
We override this method to make sure that we can handle things that
aren't eminently hashable like an ``array<array<int64>>``.
"""
return hash(self.dtype._literal_value_hash_key(self.value))
class NullLiteral(Literal):
"""Typeless NULL literal"""
value = Arg(type(None), default=None)
dtype = Arg(dt.Null, default=dt.null)
class ScalarParameter(ValueOp):
_counter = itertools.count()
dtype = Arg(dt.dtype)
counter = Arg(int, default=lambda: next(ScalarParameter._counter))
def resolve_name(self):
return 'param_{:d}'.format(self.counter)
def __repr__(self):
return '{}(type={})'.format(type(self).__name__, self.dtype)
def __hash__(self):
return hash((self.dtype, self.counter))
def output_type(self):
return self.dtype.scalar_type()
def equals(self, other, cache=None):
return (
isinstance(other, ScalarParameter)
and self.counter == other.counter
and self.dtype.equals(other.dtype, cache=cache)
)
@property
def inputs(self):
return ()
def root_tables(self):
return []
class ExpressionList(Node):
"""Data structure for a list of arbitrary expressions"""
exprs = Arg(rlz.noop)
def __init__(self, values):
super().__init__(list(map(rlz.any, values)))
@property
def inputs(self):
return (tuple(self.exprs),)
def root_tables(self):
return distinct_roots(self.exprs)
def output_type(self):
return ir.ExprList
class ValueList(ValueOp):
"""Data structure for a list of value expressions"""
values = Arg(rlz.noop)
display_argnames = False # disable showing argnames in repr
def __init__(self, values):
super().__init__(tuple(map(rlz.any, values)))
def output_type(self):
dtype = rlz.highest_precedence_dtype(self.values)
return functools.partial(ir.ListExpr, dtype=dtype)
def root_tables(self):
return distinct_roots(*self.values)
# ----------------------------------------------------------------------
# GeoSpatial operations
class GeoSpatialBinOp(BinaryOp):
"""Geo Spatial base binary"""
left = Arg(rlz.geospatial)
right = Arg(rlz.geospatial)
class GeoSpatialUnOp(UnaryOp):
"""Geo Spatial base unary"""
arg = Arg(rlz.geospatial)
class GeoDistance(GeoSpatialBinOp):
"""Returns minimum distance between two geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoContains(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoContainsProperly(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one,
and no boundary points are shared."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCovers(GeoSpatialBinOp):
"""Returns True if no point in Geometry B is outside Geometry A"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCoveredBy(GeoSpatialBinOp):
"""Returns True if no point in Geometry/Geography A is
outside Geometry/Geography B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCrosses(GeoSpatialBinOp):
"""Returns True if the supplied geometries have some, but not all,
interior points in common."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoDisjoint(GeoSpatialBinOp):
"""Returns True if the Geometries do not “spatially intersect” -
if they do not share any space together."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoEquals(GeoSpatialBinOp):
"""Returns True if the given geometries represent the same geometry."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoGeometryN(GeoSpatialUnOp):
"""Returns the Nth Geometry of a Multi geometry."""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoGeometryType(GeoSpatialUnOp):
"""Returns the type of the geometry."""
output_type = rlz.shape_like('args', dt.string)
class GeoIntersects(GeoSpatialBinOp):
"""Returns True if the Geometries/Geography “spatially intersect in 2D”
- (share any portion of space) and False if they don’t (they are Disjoint).
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIsValid(GeoSpatialUnOp):
"""Returns true if the geometry is well-formed."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoLineLocatePoint(GeoSpatialBinOp):
"""
Locate the distance a point falls along the length of a line.
Returns a float between zero and one representing the location of the
closest point on the linestring to the given point, as a fraction of the
total 2d line length.
"""
left = Arg(rlz.linestring)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.halffloat)
class GeoLineMerge(GeoSpatialUnOp):
"""
Merge a MultiLineString into a LineString.
Returns a (set of) LineString(s) formed by sewing together the
constituent line work of a multilinestring. If a geometry other than
a linestring or multilinestring is given, this will return an empty
geometry collection.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoLineSubstring(GeoSpatialUnOp):
"""
Clip a substring from a LineString.
Returns a linestring that is a substring of the input one, starting
and ending at the given fractions of the total 2d length. The second
and third arguments are floating point values between zero and one.
This only works with linestrings.
"""
arg = Arg(rlz.linestring)
start = Arg(rlz.floating)
end = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.linestring)
class GeoOrderingEquals(GeoSpatialBinOp):
"""
Check if two geometries are equal and have the same point ordering.
Returns true if the two geometries are equal and the coordinates
are in the same order.
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoOverlaps(GeoSpatialBinOp):
"""Returns True if the Geometries share space, are of the same dimension,
but are not completely contained by each other."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoTouches(GeoSpatialBinOp):
"""Returns True if the geometries have at least one point in common,
but their interiors do not intersect."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoUnaryUnion(Reduction):
"""Returns the pointwise union of the geometries in the column."""
arg = Arg(rlz.column(rlz.geospatial))
def output_type(self):
return dt.geometry.scalar_type()
class GeoUnion(GeoSpatialBinOp):
"""Returns the pointwise union of the two geometries."""
output_type = rlz.shape_like('args', dt.geometry)
class GeoArea(GeoSpatialUnOp):
"""Area of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoPerimeter(GeoSpatialUnOp):
"""Perimeter of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoLength(GeoSpatialUnOp):
"""Length of geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoMaxDistance(GeoSpatialBinOp):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoX(GeoSpatialUnOp):
"""Return the X coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoY(GeoSpatialUnOp):
"""Return the Y coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMax(GeoSpatialUnOp):
"""Returns X maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMax(GeoSpatialUnOp):
"""Returns Y maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoStartPoint(GeoSpatialUnOp):
"""Returns the first point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoEndPoint(GeoSpatialUnOp):
"""Returns the last point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoPoint(GeoSpatialBinOp):
"""
Return a point constructed on the fly from the provided coordinate values.
Constant coordinates result in construction of a POINT literal.
"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.point)
class GeoPointN(GeoSpatialUnOp):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
"""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.point)
class GeoNPoints(GeoSpatialUnOp):
"""Return the number of points in a geometry. Works for all geometries"""
output_type = rlz.shape_like('args', dt.int64)
class GeoNRings(GeoSpatialUnOp):
"""If the geometry is a polygon or multi-polygon returns the number of
rings. It counts the outer rings as well
"""
output_type = rlz.shape_like('args', dt.int64)
class GeoSRID(GeoSpatialUnOp):
"""Returns the spatial reference identifier for the ST_Geometry."""
output_type = rlz.shape_like('args', dt.int64)
class GeoSetSRID(GeoSpatialUnOp):
"""Set the spatial reference identifier for the ST_Geometry."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoBuffer(GeoSpatialUnOp):
"""Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry.
"""
radius = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.geometry)
class GeoCentroid(GeoSpatialUnOp):
"""Returns the geometric center of a geometry."""
output_type = rlz.shape_like('arg', dt.point)
class GeoDFullyWithin(GeoSpatialBinOp):
"""Returns True if the geometries are fully within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoDWithin(GeoSpatialBinOp):
"""Returns True if the geometries are within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoEnvelope(GeoSpatialUnOp):
"""Returns a geometry representing the boundingbox of the supplied geometry.
"""
output_type = rlz.shape_like('arg', dt.polygon)
class GeoAzimuth(GeoSpatialBinOp):
"""Returns the angle in radians from the horizontal of the vector defined
by pointA and pointB. Angle is computed clockwise from down-to-up:
on the clock: 12=0; 3=PI/2; 6=PI; 9=3PI/2.
"""
left = Arg(rlz.point)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.float64)
class GeoWithin(GeoSpatialBinOp):
"""Returns True if the geometry A is completely inside geometry B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIntersection(GeoSpatialBinOp):
"""Returns a geometry that represents the point set intersection
of the Geometries.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoDifference(GeoSpatialBinOp):
"""Returns a geometry that represents that part of geometry A
that does not intersect with geometry B
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoSimplify(GeoSpatialUnOp):
"""Returns a simplified version of the given geometry."""
tolerance = Arg(rlz.floating)
preserve_collapsed = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoTransform(GeoSpatialUnOp):
"""Returns a transformed version of the given geometry into a new SRID."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoAsBinary(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography without SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKB(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKT(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.string)
class GeoAsText(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography without SRID metadata.
"""
output_type = rlz.shape_like('arg', dt.string)
class ElementWiseVectorizedUDF(ValueOp):
"""Node for element wise UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ReductionVectorizedUDF(Reduction):
"""Node for reduction UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.scalar_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class AnalyticVectorizedUDF(AnalyticOp):
"""Node for analytics UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ExistsSubquery(Node):
"""Helper class"""
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
class NotExistsSubquery(Node):
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
|
when
|
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
|
import collections
import functools
import itertools
import operator
from contextlib import suppress
from typing import Any, Dict, List
import numpy as np
import toolz
from cached_property import cached_property
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis import util
from ibis.expr.schema import HasSchema, Schema
from ibis.expr.signature import Annotable
from ibis.expr.signature import Argument as Arg
def _safe_repr(x, memo=None):
return x._repr(memo=memo) if isinstance(x, (ir.Expr, Node)) else repr(x)
# TODO: move to analysis
def distinct_roots(*expressions):
roots = toolz.concat(expr.op().root_tables() for expr in expressions)
return list(toolz.unique(roots))
class Node(Annotable):
__slots__ = '_expr_cached', '_hash'
def __repr__(self):
return self._repr()
def _repr(self, memo=None):
if memo is None:
from ibis.expr.format import FormatMemo
memo = FormatMemo()
opname = type(self).__name__
pprint_args = []
def _pp(x):
return _safe_repr(x, memo=memo)
for x in self.args:
if isinstance(x, (tuple, list)):
pp = repr(list(map(_pp, x)))
else:
pp = _pp(x)
pprint_args.append(pp)
return '{}({})'.format(opname, ', '.join(pprint_args))
def __getstate__(self) -> Dict[str, Any]:
"""The attributes _expr_cached and _hash are
used as caches; they can be excluded from
serialization without affecting correctness.
Excluding _expr_cached and _hash from serialization
will allow the serialized bytes to be the same for
equivalent Node objets.
Returns
-------
Dict[str, Any]
A dictionary storing the objects attributes.
"""
excluded_slots = {'_expr_cached', '_hash'}
return {
slot: getattr(self, slot)
for slot in self.__slots__
if slot not in excluded_slots
}
def __setstate__(self, state: Dict[str, Any]) -> None:
"""
Parameters
----------
state: Dict[str, Any]
A dictionary storing the objects attributes.
"""
for slot in state:
setattr(self, slot, state[slot])
@property
def inputs(self):
return tuple(self.args)
def blocks(self):
# The contents of this node at referentially distinct and may not be
# analyzed deeper
return False
def flat_args(self):
for arg in self.args:
if not isinstance(arg, str) and isinstance(
arg, collections.abc.Iterable
):
for x in arg:
yield x
else:
yield arg
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(
(type(self),)
+ tuple(
element.op() if isinstance(element, ir.Expr) else element
for element in self.flat_args()
)
)
return self._hash
def __eq__(self, other):
return self.equals(other)
def equals(self, other, cache=None):
if cache is None:
cache = {}
key = self, other
try:
return cache[key]
except KeyError:
cache[key] = result = self is other or (
type(self) == type(other)
and all_equal(self.args, other.args, cache=cache)
)
return result
def compatible_with(self, other):
return self.equals(other)
def is_ancestor(self, other):
if isinstance(other, ir.Expr):
other = other.op()
return self.equals(other)
def to_expr(self):
if not hasattr(self, '_expr_cached'):
self._expr_cached = self._make_expr()
return self._expr_cached
def _make_expr(self):
klass = self.output_type()
return klass(self)
def output_type(self):
"""
This function must resolve the output type of the expression and return
the node wrapped in the appropriate ValueExpr type.
"""
raise NotImplementedError
class ValueOp(Node):
def root_tables(self):
exprs = [arg for arg in self.args if isinstance(arg, ir.Expr)]
return distinct_roots(*exprs)
def resolve_name(self):
raise com.ExpressionError(f'Expression is not named: {type(self)}')
def has_resolved_name(self):
return False
def all_equal(left, right, cache=None):
"""Check whether two objects `left` and `right` are equal.
Parameters
----------
left : Union[object, Expr, Node]
right : Union[object, Expr, Node]
cache : Optional[Dict[Tuple[Node, Node], bool]]
A dictionary indicating whether two Nodes are equal
"""
if cache is None:
cache = {}
if util.is_iterable(left):
# check that left and right are equal length iterables and that all
# of their elements are equal
return (
util.is_iterable(right)
and len(left) == len(right)
and all(
itertools.starmap(
functools.partial(all_equal, cache=cache), zip(left, right)
)
)
)
if hasattr(left, 'equals'):
return left.equals(right, cache=cache)
return left == right
_table_names = ('unbound_table_{:d}'.format(i) for i in itertools.count())
def genname():
return next(_table_names)
class TableNode(Node):
def get_type(self, name):
return self.schema[name]
def output_type(self):
return ir.TableExpr
def aggregate(self, this, metrics, by=None, having=None):
return Aggregation(this, metrics, by=by, having=having)
def sort_by(self, expr, sort_exprs):
return Selection(expr, [], sort_keys=sort_exprs)
def is_ancestor(self, other):
import ibis.expr.lineage as lin
if isinstance(other, ir.Expr):
other = other.op()
if self.equals(other):
return True
fn = lambda e: (lin.proceed, e.op()) # noqa: E731
expr = self.to_expr()
for child in lin.traverse(fn, expr):
if child.equals(other):
return True
return False
class TableColumn(ValueOp):
"""Selects a column from a TableExpr"""
name = Arg((str, int))
table = Arg(ir.TableExpr)
def __init__(self, name, table):
schema = table.schema()
if isinstance(name, int):
name = schema.name_at_position(name)
super().__init__(name, table)
def _validate(self):
if self.name not in self.table.schema():
raise com.IbisTypeError(
"'{}' is not a field in {}".format(
self.name, self.table.columns
)
)
def parent(self):
return self.table
def resolve_name(self):
return self.name
def has_resolved_name(self):
return True
def root_tables(self):
return self.table.op().root_tables()
def _make_expr(self):
dtype = self.table._get_type(self.name)
klass = dtype.column_type()
return klass(self, name=self.name)
class RowID(ValueOp):
"""The row number (an autonumeric) of the returned result."""
def output_type(self):
return dt.int64.column_type()
def resolve_name(self):
return 'rowid'
def has_resolved_name(self):
return True
def find_all_base_tables(expr, memo=None):
if memo is None:
memo = {}
node = expr.op()
if isinstance(expr, ir.TableExpr) and node.blocks():
if expr not in memo:
memo[node] = expr
return memo
for arg in expr.op().flat_args():
if isinstance(arg, ir.Expr):
find_all_base_tables(arg, memo)
return memo
class PhysicalTable(TableNode, HasSchema):
def blocks(self):
return True
class UnboundTable(PhysicalTable):
schema = Arg(sch.Schema)
name = Arg(str, default=genname)
class DatabaseTable(PhysicalTable):
name = Arg(str)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def change_name(self, new_name):
return type(self)(new_name, self.args[1], self.source)
class SQLQueryResult(TableNode, HasSchema):
"""A table sourced from the result set of a select query"""
query = Arg(rlz.noop)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def blocks(self):
return True
class TableArrayView(ValueOp):
"""
(Temporary?) Helper operation class for SQL translation (fully formed table
subqueries to be viewed as arrays)
"""
table = Arg(ir.TableExpr)
name = Arg(str)
def __init__(self, table):
schema = table.schema()
if len(schema) > 1:
raise com.ExpressionError('Table can only have a single column')
name = schema.names[0]
return super().__init__(table, name)
def _make_expr(self):
ctype = self.table._get_type(self.name)
klass = ctype.column_type()
return klass(self, name=self.name)
class UnaryOp(ValueOp):
arg = Arg(rlz.any)
class BinaryOp(ValueOp):
"""A binary operation"""
left = Arg(rlz.any)
right = Arg(rlz.any)
class Cast(ValueOp):
arg = Arg(rlz.any)
to = Arg(dt.dtype)
# see #396 for the issue preventing this
# def resolve_name(self):
# return self.args[0].get_name()
def output_type(self):
return rlz.shape_like(self.arg, dtype=self.to)
class TypeOf(UnaryOp):
output_type = rlz.shape_like('arg', dt.string)
class Negate(UnaryOp):
arg = Arg(rlz.one_of((rlz.numeric(), rlz.interval())))
output_type = rlz.typeof('arg')
class IsNull(UnaryOp):
"""Returns true if values are null
Returns
-------
isnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class NotNull(UnaryOp):
"""Returns true if values are not null
Returns
-------
notnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class ZeroIfNull(UnaryOp):
output_type = rlz.typeof('arg')
class IfNull(ValueOp):
"""Equivalent to (but perhaps implemented differently):
case().when(expr.notnull(), expr)
.else_(null_substitute_expr)
"""
arg = Arg(rlz.any)
ifnull_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIf(ValueOp):
"""Set values to NULL if they equal the null_if_expr"""
arg = Arg(rlz.any)
null_if_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIfZero(ValueOp):
"""
Set values to NULL if they equal to zero. Commonly used in cases where
divide-by-zero would produce an overflow or infinity.
Equivalent to (value == 0).ifelse(ibis.NA, value)
Returns
-------
maybe_nulled : type of caller
"""
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class IsNan(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class IsInf(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class CoalesceLike(ValueOp):
# According to Impala documentation:
# Return type: same as the initial argument value, except that integer
# values are promoted to BIGINT and floating-point values are promoted to
# DOUBLE; use CAST() when inserting into a smaller numeric column
arg = Arg(rlz.list_of(rlz.any))
def output_type(self):
first = self.arg[0]
if isinstance(first, (ir.IntegerValue, ir.FloatingValue)):
dtype = first.type().largest
else:
dtype = first.type()
# self.arg is a list of value expressions
return rlz.shape_like(self.arg, dtype)
class Coalesce(CoalesceLike):
pass
class Greatest(CoalesceLike):
pass
class Least(CoalesceLike):
pass
class Abs(UnaryOp):
"""Absolute value"""
output_type = rlz.typeof('arg')
class Ceil(UnaryOp):
"""
Round up to the nearest integer value greater than or equal to this value
Returns
-------
ceiled : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Floor(UnaryOp):
"""
Round down to the nearest integer value less than or equal to this value
Returns
-------
floored : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Round(ValueOp):
arg = Arg(rlz.numeric)
digits = Arg(rlz.numeric, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
return self.arg._factory
elif self.digits is None:
return rlz.shape_like(self.arg, dt.int64)
else:
return rlz.shape_like(self.arg, dt.double)
class Clip(ValueOp):
arg = Arg(rlz.strict_numeric)
lower = Arg(rlz.strict_numeric, default=None)
upper = Arg(rlz.strict_numeric, default=None)
output_type = rlz.typeof('arg')
class BaseConvert(ValueOp):
arg = Arg(rlz.one_of([rlz.integer, rlz.string]))
from_base = Arg(rlz.integer)
to_base = Arg(rlz.integer)
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class MathUnaryOp(UnaryOp):
arg = Arg(rlz.numeric)
def output_type(self):
arg = self.arg
if isinstance(self.arg, ir.DecimalValue):
dtype = arg.type()
else:
dtype = dt.double
return rlz.shape_like(arg, dtype)
class ExpandingTypeMathUnaryOp(MathUnaryOp):
def output_type(self):
if not isinstance(self.arg, ir.DecimalValue):
return super().output_type()
arg = self.arg
return rlz.shape_like(arg, arg.type().largest)
class Exp(ExpandingTypeMathUnaryOp):
pass
class Sign(UnaryOp):
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class Sqrt(MathUnaryOp):
pass
class Logarithm(MathUnaryOp):
arg = Arg(rlz.strict_numeric)
class Log(Logarithm):
arg = Arg(rlz.strict_numeric)
base = Arg(rlz.strict_numeric, default=None)
class Ln(Logarithm):
"""Natural logarithm"""
class Log2(Logarithm):
"""Logarithm base 2"""
class Log10(Logarithm):
"""Logarithm base 10"""
class Degrees(ExpandingTypeMathUnaryOp):
"""Converts radians to degrees"""
arg = Arg(rlz.numeric)
class Radians(MathUnaryOp):
"""Converts degrees to radians"""
arg = Arg(rlz.numeric)
# TRIGONOMETRIC OPERATIONS
class TrigonometricUnary(MathUnaryOp):
"""Trigonometric base unary"""
arg = Arg(rlz.numeric)
class TrigonometricBinary(BinaryOp):
"""Trigonometric base binary"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.float64)
class Acos(TrigonometricUnary):
"""Returns the arc cosine of x"""
class Asin(TrigonometricUnary):
"""Returns the arc sine of x"""
class Atan(TrigonometricUnary):
"""Returns the arc tangent of x"""
class Atan2(TrigonometricBinary):
"""Returns the arc tangent of x and y"""
class Cos(TrigonometricUnary):
"""Returns the cosine of x"""
class Cot(TrigonometricUnary):
"""Returns the cotangent of x"""
class Sin(TrigonometricUnary):
"""Returns the sine of x"""
class Tan(TrigonometricUnary):
"""Returns the tangent of x"""
class StringUnaryOp(UnaryOp):
arg = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class Uppercase(StringUnaryOp):
"""Convert string to all uppercase"""
class Lowercase(StringUnaryOp):
"""Convert string to all lowercase"""
class Reverse(StringUnaryOp):
"""Reverse string"""
class Strip(StringUnaryOp):
"""Remove whitespace from left and right sides of string"""
class LStrip(StringUnaryOp):
"""Remove whitespace from left side of string"""
class RStrip(StringUnaryOp):
"""Remove whitespace from right side of string"""
class Capitalize(StringUnaryOp):
"""Return a capitalized version of input string"""
class Substring(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.integer)
length = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StrRight(ValueOp):
arg = Arg(rlz.string)
nchars = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class Repeat(ValueOp):
arg = Arg(rlz.string)
times = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class StringFind(ValueOp):
arg = Arg(rlz.string)
substr = Arg(rlz.string)
start = Arg(rlz.integer, default=None)
end = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.int64)
class Translate(ValueOp):
arg = Arg(rlz.string)
from_str = Arg(rlz.string)
to_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class LPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class RPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class FindInSet(ValueOp):
needle = Arg(rlz.string)
values = Arg(rlz.list_of(rlz.string, min_length=1))
output_type = rlz.shape_like('needle', dt.int64)
class StringJoin(ValueOp):
sep = Arg(rlz.string)
arg = Arg(rlz.list_of(rlz.string, min_length=1))
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class StartsWith(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class EndsWith(ValueOp):
arg = Arg(rlz.string)
end = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class BooleanValueOp:
pass
class FuzzySearch(ValueOp, BooleanValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.boolean)
class StringSQLLike(FuzzySearch):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
escape = Arg(str, default=None)
class StringSQLILike(StringSQLLike):
"""SQL ilike operation"""
class RegexSearch(FuzzySearch):
pass
class RegexExtract(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
index = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class RegexReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringSplit(ValueOp):
arg = Arg(rlz.string)
delimiter = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.Array(dt.string))
class StringConcat(ValueOp):
arg = Arg(rlz.list_of(rlz.string))
output_type = rlz.shape_like('arg', dt.string)
class ParseURL(ValueOp):
arg = Arg(rlz.string)
extract = Arg(
rlz.isin(
{
'PROTOCOL',
'HOST',
'PATH',
'REF',
'AUTHORITY',
'FILE',
'USERINFO',
'QUERY',
}
)
)
key = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StringLength(UnaryOp):
"""
Compute length of strings
Returns
-------
length : int32
"""
output_type = rlz.shape_like('arg', dt.int32)
class StringAscii(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
# ----------------------------------------------------------------------
class Reduction(ValueOp):
_reduction = True
class Count(Reduction):
arg = Arg((ir.ColumnExpr, ir.TableExpr))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class Arbitrary(Reduction):
arg = Arg(rlz.column(rlz.any))
how = Arg(rlz.isin({'first', 'last', 'heavy'}), default=None)
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitAnd(Reduction):
"""Aggregate bitwise AND operation.
All elements in an integer column are ANDed together. This can be used
to determine which bit flags are set on all elements.
Resources:
* `BigQuery BIT_AND
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_and>`_
* `MySQL BIT_AND
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-and>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitOr(Reduction):
"""Aggregate bitwise OR operation.
All elements in an integer column are ORed together. This can be used
to determine which bit flags are set on any element.
Resources:
* `BigQuery BIT_OR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_or>`_
* `MySQL BIT_OR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-or>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitXor(Reduction):
"""Aggregate bitwise XOR operation.
All elements in an integer column are XORed together. This can be used
as a parity checksum of element values.
Resources:
* `BigQuery BIT_XOR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_xor>`_
* `MySQL BIT_XOR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-xor>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Sum(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.scalar_type()
class Mean(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type()
else:
dtype = dt.float64
return dtype.scalar_type()
class Quantile(Reduction):
arg = Arg(rlz.any)
quantile = Arg(rlz.strict_numeric)
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.float64.scalar_type()
class MultiQuantile(Quantile):
arg = Arg(rlz.any)
quantile = Arg(rlz.value(dt.Array(dt.float64)))
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.Array(dt.float64).scalar_type()
class VarianceBase(Reduction):
arg = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.scalar_type()
class StandardDev(VarianceBase):
pass
class Variance(VarianceBase):
pass
class Correlation(Reduction):
"""Coefficient of correlation of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Covariance(Reduction):
"""Covariance of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Max(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Min(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class HLLCardinality(Reduction):
"""Approximate number of unique values using HyperLogLog algorithm.
Impala offers the NDV built-in function for this.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
# Impala 2.0 and higher returns a DOUBLE
# return ir.DoubleScalar
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class GroupConcat(Reduction):
arg = Arg(rlz.column(rlz.any))
sep = Arg(rlz.string, default=',')
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.string.scalar_type()
class CMSMedian(Reduction):
"""
Compute the approximate median of a set of comparable values using the
Count-Min-Sketch algorithm. Exposed in Impala using APPX_MEDIAN.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
# ----------------------------------------------------------------------
# Analytic functions
class AnalyticOp(ValueOp):
pass
class WindowOp(ValueOp):
expr = Arg(rlz.noop)
window = Arg(rlz.noop)
output_type = rlz.array_like('expr')
display_argnames = False
def __init__(self, expr, window):
from ibis.expr.analysis import is_analytic
from ibis.expr.window import propagate_down_window
if not is_analytic(expr):
raise com.IbisInputError(
'Expression does not contain a valid window operation'
)
table = ir.find_base_table(expr)
if table is not None:
window = window.bind(table)
if window.max_lookback is not None:
error_msg = (
"'max lookback' windows must be ordered "
"by a timestamp column"
)
if len(window._order_by) != 1:
raise com.IbisInputError(error_msg)
order_var = window._order_by[0].op().args[0]
if not isinstance(order_var.type(), dt.Timestamp):
raise com.IbisInputError(error_msg)
expr = propagate_down_window(expr, window)
super().__init__(expr, window)
def over(self, window):
new_window = self.window.combine(window)
return WindowOp(self.expr, new_window)
@property
def inputs(self):
return self.expr.op().inputs[0], self.window
def root_tables(self):
return distinct_roots(
self.expr, *self.window._order_by, *self.window._group_by
)
class ShiftBase(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
offset = Arg(rlz.one_of((rlz.integer, rlz.interval)), default=None)
default = Arg(rlz.any, default=None)
output_type = rlz.typeof('arg')
class Lag(ShiftBase):
pass
class Lead(ShiftBase):
pass
class RankBase(AnalyticOp):
def output_type(self):
return dt.int64.column_type()
class MinRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order.
Examples
--------
values ranks
1 0
1 0
2 2
2 2
2 2
3 5
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL RANK()
arg = Arg(rlz.column(rlz.any))
class DenseRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order, ignoring duplicate values.
Examples
--------
values ranks
1 0
1 0
2 1
2 1
2 1
3 2
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL DENSE_RANK()
arg = Arg(rlz.column(rlz.any))
class RowNumber(RankBase):
"""
Compute row number starting from 0 after sorting by column expression
Examples
--------
>>> import ibis
>>> t = ibis.table([('values', dt.int64)])
>>> w = ibis.window(order_by=t.values)
>>> row_num = ibis.row_number().over(w)
>>> result = t[t.values, row_num.name('row_num')]
Returns
-------
row_number : Int64Column, starting from 0
"""
# Equivalent to SQL ROW_NUMBER()
class CumulativeOp(AnalyticOp):
pass
class CumulativeSum(CumulativeOp):
"""Cumulative sum. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.column_type()
class CumulativeMean(CumulativeOp):
"""Cumulative mean. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.column_type()
class CumulativeMax(CumulativeOp):
"""Cumulative max. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class CumulativeMin(CumulativeOp):
"""Cumulative min. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class PercentRank(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.shape_like('arg', dt.double)
class NTile(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
buckets = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.int64)
class FirstValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class LastValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class NthValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
nth = Arg(rlz.integer)
output_type = rlz.typeof('arg')
# ----------------------------------------------------------------------
# Distinct stuff
class Distinct(TableNode, HasSchema):
"""
Distinct is a table-level unique-ing operation.
In SQL, you might have:
SELECT DISTINCT foo
FROM table
SELECT DISTINCT foo, bar
FROM table
"""
table = Arg(ir.TableExpr)
def _validate(self):
# check whether schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.table.schema()
def blocks(self):
return True
class DistinctColumn(ValueOp):
"""
COUNT(DISTINCT ...) is really just syntactic suger, but we provide a
distinct().count() nicety for users nonetheless.
For all intents and purposes, like Distinct, but can be distinguished later
for evaluation if the result should be array-like versus table-like. Also
for calling count()
"""
arg = Arg(rlz.noop)
output_type = rlz.typeof('arg')
def count(self):
"""Only valid if the distinct contains a single column"""
return CountDistinct(self.arg)
class CountDistinct(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.int64.scalar_type()
# ---------------------------------------------------------------------
# Boolean reductions and semi/anti join support
class Any(ValueOp):
# Depending on the kind of input boolean array, the result might either be
# array-like (an existence-type predicate) or scalar (a reduction)
arg = Arg(rlz.column(rlz.boolean))
@property
def _reduction(self):
roots = self.arg.op().root_tables()
return len(roots) < 2
def output_type(self):
if self._reduction:
return dt.boolean.scalar_type()
else:
return dt.boolean.column_type()
def negate(self):
return NotAny(self.arg)
class All(ValueOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.scalar_like('arg')
_reduction = True
def negate(self):
return NotAll(self.arg)
class NotAny(Any):
def negate(self):
return Any(self.arg)
class NotAll(All):
def negate(self):
return All(self.arg)
class CumulativeAny(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
class CumulativeAll(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
# ---------------------------------------------------------------------
class TypedCaseBuilder:
__slots__ = ()
def type(self):
types = [result.type() for result in self.results]
return dt.highest_precedence(types)
def else_(self, result_expr):
"""
Specify
Returns
-------
builder : CaseBuilder
"""
kwargs = {
slot: getattr(self, slot)
for slot in self.__slots__
if slot != 'default'
}
result_expr = ir.as_value_expr(result_expr)
kwargs['default'] = result_expr
# Maintain immutability
return type(self)(**kwargs)
def end(self):
default = self.default
if default is None:
default = ir.null().cast(self.type())
args = [
getattr(self, slot) for slot in self.__slots__ if slot != 'default'
]
args.append(default)
op = self.__class__.case_op(*args)
return op.to_expr()
class SimpleCase(ValueOp):
base = Arg(rlz.any)
cases = Arg(rlz.list_of(rlz.any))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
return distinct_roots(
*itertools.chain(
[self.base],
self.cases,
self.results,
[] if self.default is None else [self.default],
)
)
def output_type(self):
exprs = self.results + [self.default]
return rlz.shape_like(self.base, dtype=exprs.type())
class SimpleCaseBuilder(TypedCaseBuilder):
__slots__ = 'base', 'cases', 'results', 'default'
case_op = SimpleCase
def __init__(self, base, cases=None, results=None, default=None):
self.base = base
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not rlz.comparable(self.base, case_expr):
raise TypeError(
'Base expression and passed case are not ' 'comparable'
)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(self.base, cases, results, self.default)
class SearchedCase(ValueOp):
cases = Arg(rlz.list_of(rlz.boolean))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
cases, results, default = self.args
return distinct_roots(
*itertools.chain(
cases.values,
results.values,
[] if default is None else [default],
)
)
def output_type(self):
exprs = self.results + [self.default]
dtype = rlz.highest_precedence_dtype(exprs)
return rlz.shape_like(self.cases, dtype)
class SearchedCaseBuilder(TypedCaseBuilder):
__slots__ = 'cases', 'results', 'default'
case_op = SearchedCase
def __init__(self, cases=None, results=None, default=None):
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
# MASKED: when function (lines 1586-1615)
class Where(ValueOp):
"""
Ternary case expression, equivalent to
bool_expr.case()
.when(True, true_expr)
.else_(false_or_null_expr)
"""
bool_expr = Arg(rlz.boolean)
true_expr = Arg(rlz.any)
false_null_expr = Arg(rlz.any)
def output_type(self):
return rlz.shape_like(self.bool_expr, self.true_expr.type())
def _validate_join_tables(left, right):
if not isinstance(left, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'left table'.format(type(left).__name__)
)
if not isinstance(right, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'right table'.format(type(right).__name__)
)
def _make_distinct_join_predicates(left, right, predicates):
# see GH #667
# If left and right table have a common parent expression (e.g. they
# have different filters), must add a self-reference and make the
# appropriate substitution in the join predicates
if left.equals(right):
right = right.view()
predicates = _clean_join_predicates(left, right, predicates)
return left, right, predicates
def _clean_join_predicates(left, right, predicates):
import ibis.expr.analysis as L
result = []
if not isinstance(predicates, (list, tuple)):
predicates = [predicates]
for pred in predicates:
if isinstance(pred, tuple):
if len(pred) != 2:
raise com.ExpressionError('Join key tuple must be ' 'length 2')
lk, rk = pred
lk = left._ensure_expr(lk)
rk = right._ensure_expr(rk)
pred = lk == rk
elif isinstance(pred, str):
pred = left[pred] == right[pred]
elif not isinstance(pred, ir.Expr):
raise NotImplementedError
if not isinstance(pred, ir.BooleanColumn):
raise com.ExpressionError('Join predicate must be comparison')
preds = L.flatten_predicate(pred)
result.extend(preds)
_validate_join_predicates(left, right, result)
return result
def _validate_join_predicates(left, right, predicates):
from ibis.expr.analysis import fully_originate_from
# Validate join predicates. Each predicate must be valid jointly when
# considering the roots of each input table
for predicate in predicates:
if not fully_originate_from(predicate, [left, right]):
raise com.RelationError(
'The expression {!r} does not fully '
'originate from dependencies of the table '
'expression.'.format(predicate)
)
class Join(TableNode):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def __init__(self, left, right, predicates):
_validate_join_tables(left, right)
left, right, predicates = _make_distinct_join_predicates(
left, right, predicates
)
super().__init__(left, right, predicates)
def _get_schema(self):
# For joins retaining both table schemas, merge them together here
left = self.left
right = self.right
if not left._is_materialized():
left = left.materialize()
if not right._is_materialized():
right = right.materialize()
sleft = left.schema()
sright = right.schema()
overlap = set(sleft.names) & set(sright.names)
if overlap:
raise com.RelationError(
'Joined tables have overlapping names: %s' % str(list(overlap))
)
return sleft.append(sright)
def has_schema(self):
return False
def root_tables(self):
if util.all_of([self.left.op(), self.right.op()], (Join, Selection)):
# Unraveling is not possible
return [self.left.op(), self.right.op()]
else:
return distinct_roots(self.left, self.right)
class InnerJoin(Join):
pass
class LeftJoin(Join):
pass
class RightJoin(Join):
pass
class OuterJoin(Join):
pass
class AnyInnerJoin(Join):
pass
class AnyLeftJoin(Join):
pass
class LeftSemiJoin(Join):
def _get_schema(self):
return self.left.schema()
class LeftAntiJoin(Join):
def _get_schema(self):
return self.left.schema()
class MaterializedJoin(TableNode, HasSchema):
join = Arg(ir.TableExpr)
def _validate(self):
assert isinstance(self.join.op(), Join)
# check whether the underlying schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.join.op()._get_schema()
def root_tables(self):
return self.join.op().root_tables()
def blocks(self):
return True
class CrossJoin(InnerJoin):
"""
Some databases have a CROSS JOIN operator, that may be preferential to use
over an INNER JOIN with no predicates.
"""
def __init__(self, *args, **kwargs):
if 'prefixes' in kwargs:
raise NotImplementedError
if len(args) < 2:
raise com.IbisInputError('Must pass at least 2 tables')
left = args[0]
right = args[1]
for t in args[2:]:
right = right.cross_join(t)
InnerJoin.__init__(self, left, right, [])
class AsOfJoin(Join):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
by = Arg(rlz.noop, default=None)
tolerance = Arg(rlz.interval(), default=None)
def __init__(self, left, right, predicates, by, tolerance):
super().__init__(left, right, predicates)
self.by = _clean_join_predicates(self.left, self.right, by)
self.tolerance = tolerance
self._validate_args(['by', 'tolerance'])
def _validate_args(self, args: List[str]):
for arg in args:
argument = self.signature[arg]
value = argument.validate(getattr(self, arg))
setattr(self, arg, value)
class SetOp(TableNode, HasSchema):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
def _validate(self):
if not self.left.schema().equals(self.right.schema()):
raise com.RelationError(
'Table schemas must be equal for set operations'
)
@cached_property
def schema(self):
return self.left.schema()
def blocks(self):
return True
class Union(SetOp):
distinct = Arg(rlz.validator(bool), default=False)
class Intersection(SetOp):
pass
class Difference(SetOp):
pass
class Limit(TableNode):
table = Arg(ir.TableExpr)
n = Arg(rlz.validator(int))
offset = Arg(rlz.validator(int))
def blocks(self):
return True
@property
def schema(self):
return self.table.schema()
def has_schema(self):
return self.table.op().has_schema()
def root_tables(self):
return [self]
# --------------------------------------------------------------------
# Sorting
def to_sort_key(table, key):
if isinstance(key, DeferredSortKey):
key = key.resolve(table)
if isinstance(key, ir.SortExpr):
return key
if isinstance(key, (tuple, list)):
key, sort_order = key
else:
sort_order = True
if not isinstance(key, ir.Expr):
key = table._ensure_expr(key)
if isinstance(key, (ir.SortExpr, DeferredSortKey)):
return to_sort_key(table, key)
if isinstance(sort_order, str):
if sort_order.lower() in ('desc', 'descending'):
sort_order = False
elif not isinstance(sort_order, bool):
sort_order = bool(sort_order)
return SortKey(key, ascending=sort_order).to_expr()
class SortKey(Node):
expr = Arg(rlz.column(rlz.any))
ascending = Arg(rlz.validator(bool), default=True)
def __repr__(self):
# Temporary
rows = [
'Sort key:',
' ascending: {0!s}'.format(self.ascending),
util.indent(_safe_repr(self.expr), 2),
]
return '\n'.join(rows)
def output_type(self):
return ir.SortExpr
def root_tables(self):
return self.expr.op().root_tables()
def equals(self, other, cache=None):
# TODO: might generalize this equals based on fields
# requires a proxy class with equals for non expr values
return (
isinstance(other, SortKey)
and self.expr.equals(other.expr, cache=cache)
and self.ascending == other.ascending
)
def resolve_name(self):
return self.expr.get_name()
class DeferredSortKey:
def __init__(self, what, ascending=True):
self.what = what
self.ascending = ascending
def resolve(self, parent):
what = parent._ensure_expr(self.what)
return SortKey(what, ascending=self.ascending).to_expr()
class SelfReference(TableNode, HasSchema):
table = Arg(ir.TableExpr)
@cached_property
def schema(self):
return self.table.schema()
def root_tables(self):
# The dependencies of this operation are not walked, which makes the
# table expression holding this relationally distinct from other
# expressions, so things like self-joins are possible
return [self]
def blocks(self):
return True
class Selection(TableNode, HasSchema):
table = Arg(ir.TableExpr)
selections = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self, table, selections=None, predicates=None, sort_keys=None
):
import ibis.expr.analysis as L
# Argument cleaning
selections = util.promote_list(
selections if selections is not None else []
)
projections = []
for selection in selections:
if isinstance(selection, str):
projection = table[selection]
else:
projection = selection
projections.append(projection)
sort_keys = [
to_sort_key(table, k)
for k in util.promote_list(
sort_keys if sort_keys is not None else []
)
]
predicates = list(
toolz.concat(
map(
L.flatten_predicate,
predicates if predicates is not None else [],
)
)
)
super().__init__(
table=table,
selections=projections,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator
# Need to validate that the column expressions are compatible with the
# input table; this means they must either be scalar expressions or
# array expressions originating from the same root table expression
dependent_exprs = self.selections + self.sort_keys
self.table._assert_valid(dependent_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate no overlapping columns in schema
assert self.schema
@cached_property
def schema(self):
# Resolve schema and initialize
if not self.selections:
return self.table.schema()
types = []
names = []
for projection in self.selections:
if isinstance(projection, ir.DestructColumn):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = projection.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
elif isinstance(projection, ir.ValueExpr):
names.append(projection.get_name())
types.append(projection.type())
elif isinstance(projection, ir.TableExpr):
schema = projection.schema()
names.extend(schema.names)
types.extend(schema.types)
return Schema(names, types)
def blocks(self):
return bool(self.selections)
def substitute_table(self, table_expr):
return Selection(table_expr, self.selections)
def root_tables(self):
return [self]
def can_add_filters(self, wrapped_expr, predicates):
pass
@staticmethod
def empty_or_equal(lefts, rights):
return not lefts or not rights or all_equal(lefts, rights)
def compatible_with(self, other):
# self and other are equivalent except for predicates, selections, or
# sort keys any of which is allowed to be empty. If both are not empty
# then they must be equal
if self.equals(other):
return True
if not isinstance(other, type(self)):
return False
return self.table.equals(other.table) and (
self.empty_or_equal(self.predicates, other.predicates)
and self.empty_or_equal(self.selections, other.selections)
and self.empty_or_equal(self.sort_keys, other.sort_keys)
)
# Operator combination / fusion logic
def aggregate(self, this, metrics, by=None, having=None):
if len(self.selections) > 0:
return Aggregation(this, metrics, by=by, having=having)
else:
helper = AggregateSelection(this, metrics, by, having)
return helper.get_result()
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
if not self.blocks():
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Selection(
self.table,
self.selections,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class AggregateSelection:
# sort keys cannot be discarded because of order-dependent
# aggregate functions like GROUP_CONCAT
def __init__(self, parent, metrics, by, having):
self.parent = parent
self.op = parent.op()
self.metrics = metrics
self.by = by
self.having = having
def get_result(self):
if self.op.blocks():
return self._plain_subquery()
else:
return self._attempt_pushdown()
def _plain_subquery(self):
return Aggregation(
self.parent, self.metrics, by=self.by, having=self.having
)
def _attempt_pushdown(self):
metrics_valid, lowered_metrics = self._pushdown_exprs(self.metrics)
by_valid, lowered_by = self._pushdown_exprs(self.by)
having_valid, lowered_having = self._pushdown_exprs(
self.having or None
)
if metrics_valid and by_valid and having_valid:
return Aggregation(
self.op.table,
lowered_metrics,
by=lowered_by,
having=lowered_having,
predicates=self.op.predicates,
sort_keys=self.op.sort_keys,
)
else:
return self._plain_subquery()
def _pushdown_exprs(self, exprs):
import ibis.expr.analysis as L
if exprs is None:
return True, []
resolved = self.op.table._resolve(exprs)
subbed_exprs = []
valid = False
if resolved:
for x in util.promote_list(resolved):
subbed = L.sub_for(x, [(self.parent, self.op.table)])
subbed_exprs.append(subbed)
valid = self.op.table._is_valid(subbed_exprs)
else:
valid = False
return valid, subbed_exprs
def _maybe_convert_sort_keys(table, exprs):
try:
return [to_sort_key(table, k) for k in util.promote_list(exprs)]
except com.IbisError:
return None
class Aggregation(TableNode, HasSchema):
"""
metrics : per-group scalar aggregates
by : group expressions
having : post-aggregation predicate
TODO: not putting this in the aggregate operation yet
where : pre-aggregation predicate
"""
table = Arg(ir.TableExpr)
metrics = Arg(rlz.noop)
by = Arg(rlz.noop)
having = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self,
table,
metrics,
by=None,
having=None,
predicates=None,
sort_keys=None,
):
# For tables, like joins, that are not materialized
metrics = self._rewrite_exprs(table, metrics)
by = [] if by is None else by
by = table._resolve(by)
having = [] if having is None else having
predicates = [] if predicates is None else predicates
# order by only makes sense with group by in an aggregation
sort_keys = [] if not by or sort_keys is None else sort_keys
sort_keys = [
to_sort_key(table, k) for k in util.promote_list(sort_keys)
]
by = self._rewrite_exprs(table, by)
having = self._rewrite_exprs(table, having)
predicates = self._rewrite_exprs(table, predicates)
sort_keys = self._rewrite_exprs(table, sort_keys)
super().__init__(
table=table,
metrics=metrics,
by=by,
having=having,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator, is_reduction
# All aggregates are valid
for expr in self.metrics:
if not isinstance(expr, ir.ScalarExpr) or not is_reduction(expr):
raise TypeError(
'Passed a non-aggregate expression: %s' % _safe_repr(expr)
)
for expr in self.having:
if not isinstance(expr, ir.BooleanScalar):
raise com.ExpressionError(
'Having clause must be boolean '
'expression, was: {0!s}'.format(_safe_repr(expr))
)
# All non-scalar refs originate from the input table
all_exprs = self.metrics + self.by + self.having + self.sort_keys
self.table._assert_valid(all_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate schema has no overlapping columns
assert self.schema
def _rewrite_exprs(self, table, what):
what = util.promote_list(what)
all_exprs = []
for expr in what:
if isinstance(expr, ir.ExprList):
all_exprs.extend(expr.exprs())
else:
bound_expr = ir.bind_expr(table, expr)
all_exprs.append(bound_expr)
return all_exprs
# TODO - #2832
# this optimization becomes O(n^2) when it calls into
# _lift_TableColumn in analysis.py, which itself is O(n) and is
# called on each input to the aggregation - thus creating the
# aggregation expression can be extremely slow on wide tables
# that contain a Selection.
# return [
# substitute_parents(x, past_projection=False) for x in all_exprs
# ]
def blocks(self):
return True
def substitute_table(self, table_expr):
return Aggregation(
table_expr, self.metrics, by=self.by, having=self.having
)
@cached_property
def schema(self):
names = []
types = []
for e in self.by + self.metrics:
if isinstance(e, ir.DestructValue):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = e.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
else:
names.append(e.get_name())
types.append(e.type())
return Schema(names, types)
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Aggregation(
self.table,
self.metrics,
by=self.by,
having=self.having,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class NumericBinaryOp(BinaryOp):
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
class Add(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.add)
class Multiply(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mul)
class Power(NumericBinaryOp):
def output_type(self):
if util.all_of(self.args, ir.IntegerValue):
return rlz.shape_like(self.args, dt.float64)
else:
return rlz.shape_like(self.args)
class Subtract(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.sub)
class Divide(NumericBinaryOp):
output_type = rlz.shape_like('args', dt.float64)
class FloorDivide(Divide):
output_type = rlz.shape_like('args', dt.int64)
class LogicalBinaryOp(BinaryOp):
left = Arg(rlz.boolean)
right = Arg(rlz.boolean)
output_type = rlz.shape_like('args', dt.boolean)
class Not(UnaryOp):
arg = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.boolean)
class Modulus(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mod)
class And(LogicalBinaryOp):
pass
class Or(LogicalBinaryOp):
pass
class Xor(LogicalBinaryOp):
pass
class Comparison(BinaryOp, BooleanValueOp):
left = Arg(rlz.any)
right = Arg(rlz.any)
def __init__(self, left, right):
"""
Casting rules for type promotions (for resolving the output type) may
depend in some cases on the target backend.
TODO: how will overflows be handled? Can we provide anything useful in
Ibis to help the user avoid them?
:param left:
:param right:
"""
super().__init__(*self._maybe_cast_args(left, right))
def _maybe_cast_args(self, left, right):
# it might not be necessary?
with suppress(com.IbisTypeError):
return left, rlz.cast(right, left)
with suppress(com.IbisTypeError):
return rlz.cast(left, right), right
return left, right
def output_type(self):
if not rlz.comparable(self.left, self.right):
raise TypeError(
'Arguments with datatype {} and {} are '
'not comparable'.format(self.left.type(), self.right.type())
)
return rlz.shape_like(self.args, dt.boolean)
class Equals(Comparison):
pass
class NotEquals(Comparison):
pass
class GreaterEqual(Comparison):
pass
class Greater(Comparison):
pass
class LessEqual(Comparison):
pass
class Less(Comparison):
pass
class IdenticalTo(Comparison):
pass
class Between(ValueOp, BooleanValueOp):
arg = Arg(rlz.any)
lower_bound = Arg(rlz.any)
upper_bound = Arg(rlz.any)
def output_type(self):
arg, lower, upper = self.args
if not (rlz.comparable(arg, lower) and rlz.comparable(arg, upper)):
raise TypeError('Arguments are not comparable')
return rlz.shape_like(self.args, dt.boolean)
class BetweenTime(Between):
arg = Arg(rlz.one_of([rlz.timestamp, rlz.time]))
lower_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
upper_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
class Contains(ValueOp, BooleanValueOp):
value = Arg(rlz.any)
options = Arg(
rlz.one_of(
[
rlz.list_of(rlz.any),
rlz.set_,
rlz.column(rlz.any),
rlz.array_of(rlz.any),
]
)
)
def __init__(self, value, options):
# it can be a single expression, like a column
if not isinstance(options, ir.Expr):
if util.any_of(options, ir.Expr):
# or a list of expressions
options = ir.sequence(options)
else:
# or a set of scalar values
options = frozenset(options)
super().__init__(value, options)
def output_type(self):
all_args = [self.value]
if isinstance(self.options, ir.ListExpr):
all_args += self.options
else:
all_args += [self.options]
return rlz.shape_like(all_args, dt.boolean)
class NotContains(Contains):
pass
class ReplaceValues(ValueOp):
"""
Apply a multi-value replacement on a particular column. As an example from
SQL, given DAYOFWEEK(timestamp_col), replace 1 through 5 to "WEEKDAY" and 6
and 7 to "WEEKEND"
"""
pass
class SummaryFilter(ValueOp):
expr = Arg(rlz.noop)
def output_type(self):
return dt.boolean.column_type()
class TopK(ValueOp):
arg = Arg(rlz.noop)
k = Arg(int)
by = Arg(rlz.noop)
def __init__(self, arg, k, by=None):
if by is None:
by = arg.count()
if not isinstance(arg, ir.ColumnExpr):
raise TypeError(arg)
if not isinstance(k, int) or k < 0:
raise ValueError('k must be positive integer, was: {0}'.format(k))
super().__init__(arg, k, by)
def output_type(self):
return ir.TopKExpr
def blocks(self):
return True
class Constant(ValueOp):
pass
class TimestampNow(Constant):
def output_type(self):
return dt.timestamp.scalar_type()
class RandomScalar(Constant):
def output_type(self):
return dt.float64.scalar_type()
class E(Constant):
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class Pi(Constant):
"""
The constant pi
"""
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class TemporalUnaryOp(UnaryOp):
arg = Arg(rlz.temporal)
class TimestampUnaryOp(UnaryOp):
arg = Arg(rlz.timestamp)
_date_units = {
'Y': 'Y',
'y': 'Y',
'year': 'Y',
'YEAR': 'Y',
'YYYY': 'Y',
'SYYYY': 'Y',
'YYY': 'Y',
'YY': 'Y',
'Q': 'Q',
'q': 'Q',
'quarter': 'Q',
'QUARTER': 'Q',
'M': 'M',
'month': 'M',
'MONTH': 'M',
'w': 'W',
'W': 'W',
'week': 'W',
'WEEK': 'W',
'd': 'D',
'D': 'D',
'J': 'D',
'day': 'D',
'DAY': 'D',
}
_time_units = {
'h': 'h',
'H': 'h',
'HH24': 'h',
'hour': 'h',
'HOUR': 'h',
'm': 'm',
'MI': 'm',
'minute': 'm',
'MINUTE': 'm',
's': 's',
'second': 's',
'SECOND': 's',
'ms': 'ms',
'millisecond': 'ms',
'MILLISECOND': 'ms',
'us': 'us',
'microsecond': 'ms',
'MICROSECOND': 'ms',
'ns': 'ns',
'nanosecond': 'ns',
'NANOSECOND': 'ns',
}
_timestamp_units = toolz.merge(_date_units, _time_units)
class TimestampTruncate(ValueOp):
arg = Arg(rlz.timestamp)
unit = Arg(rlz.isin(_timestamp_units))
output_type = rlz.shape_like('arg', dt.timestamp)
class DateTruncate(ValueOp):
arg = Arg(rlz.date)
unit = Arg(rlz.isin(_date_units))
output_type = rlz.shape_like('arg', dt.date)
class TimeTruncate(ValueOp):
arg = Arg(rlz.time)
unit = Arg(rlz.isin(_time_units))
output_type = rlz.shape_like('arg', dt.time)
class Strftime(ValueOp):
arg = Arg(rlz.temporal)
format_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringToTimestamp(ValueOp):
arg = Arg(rlz.string)
format_str = Arg(rlz.string)
timezone = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.Timestamp(timezone='UTC'))
class ExtractTemporalField(TemporalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
ExtractTimestampField = ExtractTemporalField
class ExtractDateField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
class ExtractTimeField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.time, rlz.timestamp]))
class ExtractYear(ExtractDateField):
pass
class ExtractMonth(ExtractDateField):
pass
class ExtractDay(ExtractDateField):
pass
class ExtractDayOfYear(ExtractDateField):
pass
class ExtractQuarter(ExtractDateField):
pass
class ExtractEpochSeconds(ExtractDateField):
pass
class ExtractWeekOfYear(ExtractDateField):
pass
class ExtractHour(ExtractTimeField):
pass
class ExtractMinute(ExtractTimeField):
pass
class ExtractSecond(ExtractTimeField):
pass
class ExtractMillisecond(ExtractTimeField):
pass
class DayOfWeekIndex(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.int16)
class DayOfWeekName(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.string)
class DayOfWeekNode(Node):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
def output_type(self):
return ir.DayOfWeek
class Time(UnaryOp):
output_type = rlz.shape_like('arg', dt.time)
class Date(UnaryOp):
output_type = rlz.shape_like('arg', dt.date)
class TimestampFromUNIX(ValueOp):
arg = Arg(rlz.any)
# Only pandas-based backends support 'ns'
unit = Arg(rlz.isin({'s', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('arg', dt.timestamp)
class DecimalUnaryOp(UnaryOp):
arg = Arg(rlz.decimal)
class DecimalPrecision(DecimalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class DecimalScale(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class Hash(ValueOp):
arg = Arg(rlz.any)
how = Arg(rlz.isin({'fnv', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', dt.int64)
class HashBytes(ValueOp):
arg = Arg(rlz.one_of({rlz.value(dt.string), rlz.value(dt.binary)}))
how = Arg(rlz.isin({'md5', 'sha1', 'sha256', 'sha512'}))
output_type = rlz.shape_like('arg', dt.binary)
class DateAdd(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateSub(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateDiff(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.date)
output_type = rlz.shape_like('left', dt.Interval('D'))
class TimeAdd(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeSub(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeDiff(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.time)
output_type = rlz.shape_like('left', dt.Interval('s'))
class TimestampAdd(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampSub(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampDiff(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(rlz.timestamp)
output_type = rlz.shape_like('left', dt.Interval('s'))
class IntervalBinaryOp(BinaryOp):
def output_type(self):
args = [
arg.cast(arg.type().value_type)
if isinstance(arg.type(), dt.Interval)
else arg
for arg in self.args
]
expr = rlz.numeric_like(args, self.__class__.op)(self)
left_dtype = self.left.type()
dtype_type = type(left_dtype)
additional_args = {
attr: getattr(left_dtype, attr)
for attr in dtype_type.__slots__
if attr not in {'unit', 'value_type'}
}
dtype = dtype_type(left_dtype.unit, expr.type(), **additional_args)
return rlz.shape_like(self.args, dtype=dtype)
class IntervalAdd(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.add
class IntervalSubtract(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.sub
class IntervalMultiply(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.mul
class IntervalFloorDivide(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.floordiv
class IntervalFromInteger(ValueOp):
arg = Arg(rlz.integer)
unit = Arg(
rlz.isin({'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'})
)
@property
def resolution(self):
return dt.Interval(self.unit).resolution
def output_type(self):
dtype = dt.Interval(self.unit, self.arg.type())
return rlz.shape_like(self.arg, dtype=dtype)
class ArrayColumn(ValueOp):
cols = Arg(rlz.list_of(rlz.column(rlz.any), min_length=1))
def _validate(self):
if len({col.type() for col in self.cols}) > 1:
raise com.IbisTypeError(
f'The types of all input columns must match exactly in a '
f'{type(self).__name__} operation.'
)
def output_type(self):
first_dtype = self.cols[0].type()
return dt.Array(first_dtype).column_type()
class ArrayLength(UnaryOp):
arg = Arg(rlz.array)
output_type = rlz.shape_like('arg', dt.int64)
class ArraySlice(ValueOp):
arg = Arg(rlz.array)
start = Arg(rlz.integer)
stop = Arg(rlz.integer, default=None)
output_type = rlz.typeof('arg')
class ArrayIndex(ValueOp):
arg = Arg(rlz.array)
index = Arg(rlz.integer)
def output_type(self):
value_dtype = self.arg.type().value_type
return rlz.shape_like(self.arg, value_dtype)
class ArrayConcat(ValueOp):
left = Arg(rlz.array)
right = Arg(rlz.array)
output_type = rlz.shape_like('left')
def _validate(self):
left_dtype, right_dtype = self.left.type(), self.right.type()
if left_dtype != right_dtype:
raise com.IbisTypeError(
'Array types must match exactly in a {} operation. '
'Left type {} != Right type {}'.format(
type(self).__name__, left_dtype, right_dtype
)
)
class ArrayRepeat(ValueOp):
arg = Arg(rlz.array)
times = Arg(rlz.integer)
output_type = rlz.typeof('arg')
class ArrayCollect(Reduction):
arg = Arg(rlz.column(rlz.any))
def output_type(self):
dtype = dt.Array(self.arg.type())
return dtype.scalar_type()
class MapLength(ValueOp):
arg = Arg(rlz.mapping)
output_type = rlz.shape_like('arg', dt.int64)
class MapValueForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
def output_type(self):
return rlz.shape_like(tuple(self.args), self.arg.type().value_type)
class MapValueOrDefaultForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
default = Arg(rlz.any)
def output_type(self):
arg = self.arg
default = self.default
map_type = arg.type()
value_type = map_type.value_type
default_type = default.type()
if default is not None and not dt.same_kind(default_type, value_type):
raise com.IbisTypeError(
"Default value\n{}\nof type {} cannot be cast to map's value "
"type {}".format(default, default_type, value_type)
)
result_type = dt.highest_precedence((default_type, value_type))
return rlz.shape_like(tuple(self.args), result_type)
class MapKeys(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().key_type))
class MapValues(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().value_type))
class MapConcat(ValueOp):
left = Arg(rlz.mapping)
right = Arg(rlz.mapping)
output_type = rlz.typeof('left')
class StructField(ValueOp):
arg = Arg(rlz.struct)
field = Arg(str)
def output_type(self):
struct_dtype = self.arg.type()
value_dtype = struct_dtype[self.field]
return rlz.shape_like(self.arg, value_dtype)
class Literal(ValueOp):
value = Arg(rlz.noop)
dtype = Arg(dt.dtype)
def __repr__(self):
return '{}({})'.format(
type(self).__name__, ', '.join(map(repr, self.args))
)
def equals(self, other, cache=None):
# Check types
if not (
isinstance(other, Literal)
and isinstance(other.value, type(self.value))
and self.dtype == other.dtype
):
return False
# Check values
if isinstance(self.value, np.ndarray):
return np.array_equal(self.value, other.value)
else:
return self.value == other.value
def output_type(self):
return self.dtype.scalar_type()
def root_tables(self):
return []
def __hash__(self) -> int:
"""Return the hash of a literal value.
We override this method to make sure that we can handle things that
aren't eminently hashable like an ``array<array<int64>>``.
"""
return hash(self.dtype._literal_value_hash_key(self.value))
class NullLiteral(Literal):
"""Typeless NULL literal"""
value = Arg(type(None), default=None)
dtype = Arg(dt.Null, default=dt.null)
class ScalarParameter(ValueOp):
_counter = itertools.count()
dtype = Arg(dt.dtype)
counter = Arg(int, default=lambda: next(ScalarParameter._counter))
def resolve_name(self):
return 'param_{:d}'.format(self.counter)
def __repr__(self):
return '{}(type={})'.format(type(self).__name__, self.dtype)
def __hash__(self):
return hash((self.dtype, self.counter))
def output_type(self):
return self.dtype.scalar_type()
def equals(self, other, cache=None):
return (
isinstance(other, ScalarParameter)
and self.counter == other.counter
and self.dtype.equals(other.dtype, cache=cache)
)
@property
def inputs(self):
return ()
def root_tables(self):
return []
class ExpressionList(Node):
"""Data structure for a list of arbitrary expressions"""
exprs = Arg(rlz.noop)
def __init__(self, values):
super().__init__(list(map(rlz.any, values)))
@property
def inputs(self):
return (tuple(self.exprs),)
def root_tables(self):
return distinct_roots(self.exprs)
def output_type(self):
return ir.ExprList
class ValueList(ValueOp):
"""Data structure for a list of value expressions"""
values = Arg(rlz.noop)
display_argnames = False # disable showing argnames in repr
def __init__(self, values):
super().__init__(tuple(map(rlz.any, values)))
def output_type(self):
dtype = rlz.highest_precedence_dtype(self.values)
return functools.partial(ir.ListExpr, dtype=dtype)
def root_tables(self):
return distinct_roots(*self.values)
# ----------------------------------------------------------------------
# GeoSpatial operations
class GeoSpatialBinOp(BinaryOp):
"""Geo Spatial base binary"""
left = Arg(rlz.geospatial)
right = Arg(rlz.geospatial)
class GeoSpatialUnOp(UnaryOp):
"""Geo Spatial base unary"""
arg = Arg(rlz.geospatial)
class GeoDistance(GeoSpatialBinOp):
"""Returns minimum distance between two geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoContains(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoContainsProperly(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one,
and no boundary points are shared."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCovers(GeoSpatialBinOp):
"""Returns True if no point in Geometry B is outside Geometry A"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCoveredBy(GeoSpatialBinOp):
"""Returns True if no point in Geometry/Geography A is
outside Geometry/Geography B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCrosses(GeoSpatialBinOp):
"""Returns True if the supplied geometries have some, but not all,
interior points in common."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoDisjoint(GeoSpatialBinOp):
"""Returns True if the Geometries do not “spatially intersect” -
if they do not share any space together."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoEquals(GeoSpatialBinOp):
"""Returns True if the given geometries represent the same geometry."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoGeometryN(GeoSpatialUnOp):
"""Returns the Nth Geometry of a Multi geometry."""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoGeometryType(GeoSpatialUnOp):
"""Returns the type of the geometry."""
output_type = rlz.shape_like('args', dt.string)
class GeoIntersects(GeoSpatialBinOp):
"""Returns True if the Geometries/Geography “spatially intersect in 2D”
- (share any portion of space) and False if they don’t (they are Disjoint).
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIsValid(GeoSpatialUnOp):
"""Returns true if the geometry is well-formed."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoLineLocatePoint(GeoSpatialBinOp):
"""
Locate the distance a point falls along the length of a line.
Returns a float between zero and one representing the location of the
closest point on the linestring to the given point, as a fraction of the
total 2d line length.
"""
left = Arg(rlz.linestring)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.halffloat)
class GeoLineMerge(GeoSpatialUnOp):
"""
Merge a MultiLineString into a LineString.
Returns a (set of) LineString(s) formed by sewing together the
constituent line work of a multilinestring. If a geometry other than
a linestring or multilinestring is given, this will return an empty
geometry collection.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoLineSubstring(GeoSpatialUnOp):
"""
Clip a substring from a LineString.
Returns a linestring that is a substring of the input one, starting
and ending at the given fractions of the total 2d length. The second
and third arguments are floating point values between zero and one.
This only works with linestrings.
"""
arg = Arg(rlz.linestring)
start = Arg(rlz.floating)
end = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.linestring)
class GeoOrderingEquals(GeoSpatialBinOp):
"""
Check if two geometries are equal and have the same point ordering.
Returns true if the two geometries are equal and the coordinates
are in the same order.
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoOverlaps(GeoSpatialBinOp):
"""Returns True if the Geometries share space, are of the same dimension,
but are not completely contained by each other."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoTouches(GeoSpatialBinOp):
"""Returns True if the geometries have at least one point in common,
but their interiors do not intersect."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoUnaryUnion(Reduction):
"""Returns the pointwise union of the geometries in the column."""
arg = Arg(rlz.column(rlz.geospatial))
def output_type(self):
return dt.geometry.scalar_type()
class GeoUnion(GeoSpatialBinOp):
"""Returns the pointwise union of the two geometries."""
output_type = rlz.shape_like('args', dt.geometry)
class GeoArea(GeoSpatialUnOp):
"""Area of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoPerimeter(GeoSpatialUnOp):
"""Perimeter of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoLength(GeoSpatialUnOp):
"""Length of geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoMaxDistance(GeoSpatialBinOp):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoX(GeoSpatialUnOp):
"""Return the X coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoY(GeoSpatialUnOp):
"""Return the Y coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMax(GeoSpatialUnOp):
"""Returns X maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMax(GeoSpatialUnOp):
"""Returns Y maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoStartPoint(GeoSpatialUnOp):
"""Returns the first point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoEndPoint(GeoSpatialUnOp):
"""Returns the last point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoPoint(GeoSpatialBinOp):
"""
Return a point constructed on the fly from the provided coordinate values.
Constant coordinates result in construction of a POINT literal.
"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.point)
class GeoPointN(GeoSpatialUnOp):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
"""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.point)
class GeoNPoints(GeoSpatialUnOp):
"""Return the number of points in a geometry. Works for all geometries"""
output_type = rlz.shape_like('args', dt.int64)
class GeoNRings(GeoSpatialUnOp):
"""If the geometry is a polygon or multi-polygon returns the number of
rings. It counts the outer rings as well
"""
output_type = rlz.shape_like('args', dt.int64)
class GeoSRID(GeoSpatialUnOp):
"""Returns the spatial reference identifier for the ST_Geometry."""
output_type = rlz.shape_like('args', dt.int64)
class GeoSetSRID(GeoSpatialUnOp):
"""Set the spatial reference identifier for the ST_Geometry."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoBuffer(GeoSpatialUnOp):
"""Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry.
"""
radius = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.geometry)
class GeoCentroid(GeoSpatialUnOp):
"""Returns the geometric center of a geometry."""
output_type = rlz.shape_like('arg', dt.point)
class GeoDFullyWithin(GeoSpatialBinOp):
"""Returns True if the geometries are fully within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoDWithin(GeoSpatialBinOp):
"""Returns True if the geometries are within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoEnvelope(GeoSpatialUnOp):
"""Returns a geometry representing the boundingbox of the supplied geometry.
"""
output_type = rlz.shape_like('arg', dt.polygon)
class GeoAzimuth(GeoSpatialBinOp):
"""Returns the angle in radians from the horizontal of the vector defined
by pointA and pointB. Angle is computed clockwise from down-to-up:
on the clock: 12=0; 3=PI/2; 6=PI; 9=3PI/2.
"""
left = Arg(rlz.point)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.float64)
class GeoWithin(GeoSpatialBinOp):
"""Returns True if the geometry A is completely inside geometry B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIntersection(GeoSpatialBinOp):
"""Returns a geometry that represents the point set intersection
of the Geometries.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoDifference(GeoSpatialBinOp):
"""Returns a geometry that represents that part of geometry A
that does not intersect with geometry B
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoSimplify(GeoSpatialUnOp):
"""Returns a simplified version of the given geometry."""
tolerance = Arg(rlz.floating)
preserve_collapsed = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoTransform(GeoSpatialUnOp):
"""Returns a transformed version of the given geometry into a new SRID."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoAsBinary(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography without SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKB(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKT(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.string)
class GeoAsText(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography without SRID metadata.
"""
output_type = rlz.shape_like('arg', dt.string)
class ElementWiseVectorizedUDF(ValueOp):
"""Node for element wise UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ReductionVectorizedUDF(Reduction):
"""Node for reduction UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.scalar_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class AnalyticVectorizedUDF(AnalyticOp):
"""Node for analytics UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ExistsSubquery(Node):
"""Helper class"""
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
class NotExistsSubquery(Node):
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
|
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(cases, results, self.default)
| 1,586
| 1,615
|
import collections
import functools
import itertools
import operator
from contextlib import suppress
from typing import Any, Dict, List
import numpy as np
import toolz
from cached_property import cached_property
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis import util
from ibis.expr.schema import HasSchema, Schema
from ibis.expr.signature import Annotable
from ibis.expr.signature import Argument as Arg
def _safe_repr(x, memo=None):
return x._repr(memo=memo) if isinstance(x, (ir.Expr, Node)) else repr(x)
# TODO: move to analysis
def distinct_roots(*expressions):
roots = toolz.concat(expr.op().root_tables() for expr in expressions)
return list(toolz.unique(roots))
class Node(Annotable):
__slots__ = '_expr_cached', '_hash'
def __repr__(self):
return self._repr()
def _repr(self, memo=None):
if memo is None:
from ibis.expr.format import FormatMemo
memo = FormatMemo()
opname = type(self).__name__
pprint_args = []
def _pp(x):
return _safe_repr(x, memo=memo)
for x in self.args:
if isinstance(x, (tuple, list)):
pp = repr(list(map(_pp, x)))
else:
pp = _pp(x)
pprint_args.append(pp)
return '{}({})'.format(opname, ', '.join(pprint_args))
def __getstate__(self) -> Dict[str, Any]:
"""The attributes _expr_cached and _hash are
used as caches; they can be excluded from
serialization without affecting correctness.
Excluding _expr_cached and _hash from serialization
will allow the serialized bytes to be the same for
equivalent Node objets.
Returns
-------
Dict[str, Any]
A dictionary storing the objects attributes.
"""
excluded_slots = {'_expr_cached', '_hash'}
return {
slot: getattr(self, slot)
for slot in self.__slots__
if slot not in excluded_slots
}
def __setstate__(self, state: Dict[str, Any]) -> None:
"""
Parameters
----------
state: Dict[str, Any]
A dictionary storing the objects attributes.
"""
for slot in state:
setattr(self, slot, state[slot])
@property
def inputs(self):
return tuple(self.args)
def blocks(self):
# The contents of this node at referentially distinct and may not be
# analyzed deeper
return False
def flat_args(self):
for arg in self.args:
if not isinstance(arg, str) and isinstance(
arg, collections.abc.Iterable
):
for x in arg:
yield x
else:
yield arg
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(
(type(self),)
+ tuple(
element.op() if isinstance(element, ir.Expr) else element
for element in self.flat_args()
)
)
return self._hash
def __eq__(self, other):
return self.equals(other)
def equals(self, other, cache=None):
if cache is None:
cache = {}
key = self, other
try:
return cache[key]
except KeyError:
cache[key] = result = self is other or (
type(self) == type(other)
and all_equal(self.args, other.args, cache=cache)
)
return result
def compatible_with(self, other):
return self.equals(other)
def is_ancestor(self, other):
if isinstance(other, ir.Expr):
other = other.op()
return self.equals(other)
def to_expr(self):
if not hasattr(self, '_expr_cached'):
self._expr_cached = self._make_expr()
return self._expr_cached
def _make_expr(self):
klass = self.output_type()
return klass(self)
def output_type(self):
"""
This function must resolve the output type of the expression and return
the node wrapped in the appropriate ValueExpr type.
"""
raise NotImplementedError
class ValueOp(Node):
def root_tables(self):
exprs = [arg for arg in self.args if isinstance(arg, ir.Expr)]
return distinct_roots(*exprs)
def resolve_name(self):
raise com.ExpressionError(f'Expression is not named: {type(self)}')
def has_resolved_name(self):
return False
def all_equal(left, right, cache=None):
"""Check whether two objects `left` and `right` are equal.
Parameters
----------
left : Union[object, Expr, Node]
right : Union[object, Expr, Node]
cache : Optional[Dict[Tuple[Node, Node], bool]]
A dictionary indicating whether two Nodes are equal
"""
if cache is None:
cache = {}
if util.is_iterable(left):
# check that left and right are equal length iterables and that all
# of their elements are equal
return (
util.is_iterable(right)
and len(left) == len(right)
and all(
itertools.starmap(
functools.partial(all_equal, cache=cache), zip(left, right)
)
)
)
if hasattr(left, 'equals'):
return left.equals(right, cache=cache)
return left == right
_table_names = ('unbound_table_{:d}'.format(i) for i in itertools.count())
def genname():
return next(_table_names)
class TableNode(Node):
def get_type(self, name):
return self.schema[name]
def output_type(self):
return ir.TableExpr
def aggregate(self, this, metrics, by=None, having=None):
return Aggregation(this, metrics, by=by, having=having)
def sort_by(self, expr, sort_exprs):
return Selection(expr, [], sort_keys=sort_exprs)
def is_ancestor(self, other):
import ibis.expr.lineage as lin
if isinstance(other, ir.Expr):
other = other.op()
if self.equals(other):
return True
fn = lambda e: (lin.proceed, e.op()) # noqa: E731
expr = self.to_expr()
for child in lin.traverse(fn, expr):
if child.equals(other):
return True
return False
class TableColumn(ValueOp):
"""Selects a column from a TableExpr"""
name = Arg((str, int))
table = Arg(ir.TableExpr)
def __init__(self, name, table):
schema = table.schema()
if isinstance(name, int):
name = schema.name_at_position(name)
super().__init__(name, table)
def _validate(self):
if self.name not in self.table.schema():
raise com.IbisTypeError(
"'{}' is not a field in {}".format(
self.name, self.table.columns
)
)
def parent(self):
return self.table
def resolve_name(self):
return self.name
def has_resolved_name(self):
return True
def root_tables(self):
return self.table.op().root_tables()
def _make_expr(self):
dtype = self.table._get_type(self.name)
klass = dtype.column_type()
return klass(self, name=self.name)
class RowID(ValueOp):
"""The row number (an autonumeric) of the returned result."""
def output_type(self):
return dt.int64.column_type()
def resolve_name(self):
return 'rowid'
def has_resolved_name(self):
return True
def find_all_base_tables(expr, memo=None):
if memo is None:
memo = {}
node = expr.op()
if isinstance(expr, ir.TableExpr) and node.blocks():
if expr not in memo:
memo[node] = expr
return memo
for arg in expr.op().flat_args():
if isinstance(arg, ir.Expr):
find_all_base_tables(arg, memo)
return memo
class PhysicalTable(TableNode, HasSchema):
def blocks(self):
return True
class UnboundTable(PhysicalTable):
schema = Arg(sch.Schema)
name = Arg(str, default=genname)
class DatabaseTable(PhysicalTable):
name = Arg(str)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def change_name(self, new_name):
return type(self)(new_name, self.args[1], self.source)
class SQLQueryResult(TableNode, HasSchema):
"""A table sourced from the result set of a select query"""
query = Arg(rlz.noop)
schema = Arg(sch.Schema)
source = Arg(rlz.client)
def blocks(self):
return True
class TableArrayView(ValueOp):
"""
(Temporary?) Helper operation class for SQL translation (fully formed table
subqueries to be viewed as arrays)
"""
table = Arg(ir.TableExpr)
name = Arg(str)
def __init__(self, table):
schema = table.schema()
if len(schema) > 1:
raise com.ExpressionError('Table can only have a single column')
name = schema.names[0]
return super().__init__(table, name)
def _make_expr(self):
ctype = self.table._get_type(self.name)
klass = ctype.column_type()
return klass(self, name=self.name)
class UnaryOp(ValueOp):
arg = Arg(rlz.any)
class BinaryOp(ValueOp):
"""A binary operation"""
left = Arg(rlz.any)
right = Arg(rlz.any)
class Cast(ValueOp):
arg = Arg(rlz.any)
to = Arg(dt.dtype)
# see #396 for the issue preventing this
# def resolve_name(self):
# return self.args[0].get_name()
def output_type(self):
return rlz.shape_like(self.arg, dtype=self.to)
class TypeOf(UnaryOp):
output_type = rlz.shape_like('arg', dt.string)
class Negate(UnaryOp):
arg = Arg(rlz.one_of((rlz.numeric(), rlz.interval())))
output_type = rlz.typeof('arg')
class IsNull(UnaryOp):
"""Returns true if values are null
Returns
-------
isnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class NotNull(UnaryOp):
"""Returns true if values are not null
Returns
-------
notnull : boolean with dimension of caller
"""
output_type = rlz.shape_like('arg', dt.boolean)
class ZeroIfNull(UnaryOp):
output_type = rlz.typeof('arg')
class IfNull(ValueOp):
"""Equivalent to (but perhaps implemented differently):
case().when(expr.notnull(), expr)
.else_(null_substitute_expr)
"""
arg = Arg(rlz.any)
ifnull_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIf(ValueOp):
"""Set values to NULL if they equal the null_if_expr"""
arg = Arg(rlz.any)
null_if_expr = Arg(rlz.any)
output_type = rlz.shape_like('args')
class NullIfZero(ValueOp):
"""
Set values to NULL if they equal to zero. Commonly used in cases where
divide-by-zero would produce an overflow or infinity.
Equivalent to (value == 0).ifelse(ibis.NA, value)
Returns
-------
maybe_nulled : type of caller
"""
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class IsNan(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class IsInf(ValueOp):
arg = Arg(rlz.floating)
output_type = rlz.shape_like('arg', dt.boolean)
class CoalesceLike(ValueOp):
# According to Impala documentation:
# Return type: same as the initial argument value, except that integer
# values are promoted to BIGINT and floating-point values are promoted to
# DOUBLE; use CAST() when inserting into a smaller numeric column
arg = Arg(rlz.list_of(rlz.any))
def output_type(self):
first = self.arg[0]
if isinstance(first, (ir.IntegerValue, ir.FloatingValue)):
dtype = first.type().largest
else:
dtype = first.type()
# self.arg is a list of value expressions
return rlz.shape_like(self.arg, dtype)
class Coalesce(CoalesceLike):
pass
class Greatest(CoalesceLike):
pass
class Least(CoalesceLike):
pass
class Abs(UnaryOp):
"""Absolute value"""
output_type = rlz.typeof('arg')
class Ceil(UnaryOp):
"""
Round up to the nearest integer value greater than or equal to this value
Returns
-------
ceiled : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Floor(UnaryOp):
"""
Round down to the nearest integer value less than or equal to this value
Returns
-------
floored : type depending on input
Decimal values: yield decimal
Other numeric values: yield integer (int32)
"""
arg = Arg(rlz.numeric)
def output_type(self):
if isinstance(self.arg.type(), dt.Decimal):
return self.arg._factory
return rlz.shape_like(self.arg, dt.int64)
class Round(ValueOp):
arg = Arg(rlz.numeric)
digits = Arg(rlz.numeric, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
return self.arg._factory
elif self.digits is None:
return rlz.shape_like(self.arg, dt.int64)
else:
return rlz.shape_like(self.arg, dt.double)
class Clip(ValueOp):
arg = Arg(rlz.strict_numeric)
lower = Arg(rlz.strict_numeric, default=None)
upper = Arg(rlz.strict_numeric, default=None)
output_type = rlz.typeof('arg')
class BaseConvert(ValueOp):
arg = Arg(rlz.one_of([rlz.integer, rlz.string]))
from_base = Arg(rlz.integer)
to_base = Arg(rlz.integer)
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class MathUnaryOp(UnaryOp):
arg = Arg(rlz.numeric)
def output_type(self):
arg = self.arg
if isinstance(self.arg, ir.DecimalValue):
dtype = arg.type()
else:
dtype = dt.double
return rlz.shape_like(arg, dtype)
class ExpandingTypeMathUnaryOp(MathUnaryOp):
def output_type(self):
if not isinstance(self.arg, ir.DecimalValue):
return super().output_type()
arg = self.arg
return rlz.shape_like(arg, arg.type().largest)
class Exp(ExpandingTypeMathUnaryOp):
pass
class Sign(UnaryOp):
arg = Arg(rlz.numeric)
output_type = rlz.typeof('arg')
class Sqrt(MathUnaryOp):
pass
class Logarithm(MathUnaryOp):
arg = Arg(rlz.strict_numeric)
class Log(Logarithm):
arg = Arg(rlz.strict_numeric)
base = Arg(rlz.strict_numeric, default=None)
class Ln(Logarithm):
"""Natural logarithm"""
class Log2(Logarithm):
"""Logarithm base 2"""
class Log10(Logarithm):
"""Logarithm base 10"""
class Degrees(ExpandingTypeMathUnaryOp):
"""Converts radians to degrees"""
arg = Arg(rlz.numeric)
class Radians(MathUnaryOp):
"""Converts degrees to radians"""
arg = Arg(rlz.numeric)
# TRIGONOMETRIC OPERATIONS
class TrigonometricUnary(MathUnaryOp):
"""Trigonometric base unary"""
arg = Arg(rlz.numeric)
class TrigonometricBinary(BinaryOp):
"""Trigonometric base binary"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.float64)
class Acos(TrigonometricUnary):
"""Returns the arc cosine of x"""
class Asin(TrigonometricUnary):
"""Returns the arc sine of x"""
class Atan(TrigonometricUnary):
"""Returns the arc tangent of x"""
class Atan2(TrigonometricBinary):
"""Returns the arc tangent of x and y"""
class Cos(TrigonometricUnary):
"""Returns the cosine of x"""
class Cot(TrigonometricUnary):
"""Returns the cotangent of x"""
class Sin(TrigonometricUnary):
"""Returns the sine of x"""
class Tan(TrigonometricUnary):
"""Returns the tangent of x"""
class StringUnaryOp(UnaryOp):
arg = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class Uppercase(StringUnaryOp):
"""Convert string to all uppercase"""
class Lowercase(StringUnaryOp):
"""Convert string to all lowercase"""
class Reverse(StringUnaryOp):
"""Reverse string"""
class Strip(StringUnaryOp):
"""Remove whitespace from left and right sides of string"""
class LStrip(StringUnaryOp):
"""Remove whitespace from left side of string"""
class RStrip(StringUnaryOp):
"""Remove whitespace from right side of string"""
class Capitalize(StringUnaryOp):
"""Return a capitalized version of input string"""
class Substring(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.integer)
length = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StrRight(ValueOp):
arg = Arg(rlz.string)
nchars = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class Repeat(ValueOp):
arg = Arg(rlz.string)
times = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class StringFind(ValueOp):
arg = Arg(rlz.string)
substr = Arg(rlz.string)
start = Arg(rlz.integer, default=None)
end = Arg(rlz.integer, default=None)
output_type = rlz.shape_like('arg', dt.int64)
class Translate(ValueOp):
arg = Arg(rlz.string)
from_str = Arg(rlz.string)
to_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class LPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class RPad(ValueOp):
arg = Arg(rlz.string)
length = Arg(rlz.integer)
pad = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class FindInSet(ValueOp):
needle = Arg(rlz.string)
values = Arg(rlz.list_of(rlz.string, min_length=1))
output_type = rlz.shape_like('needle', dt.int64)
class StringJoin(ValueOp):
sep = Arg(rlz.string)
arg = Arg(rlz.list_of(rlz.string, min_length=1))
def output_type(self):
return rlz.shape_like(tuple(self.flat_args()), dt.string)
class StartsWith(ValueOp):
arg = Arg(rlz.string)
start = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class EndsWith(ValueOp):
arg = Arg(rlz.string)
end = Arg(rlz.string)
output_type = rlz.shape_like("arg", dt.boolean)
class BooleanValueOp:
pass
class FuzzySearch(ValueOp, BooleanValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.boolean)
class StringSQLLike(FuzzySearch):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
escape = Arg(str, default=None)
class StringSQLILike(StringSQLLike):
"""SQL ilike operation"""
class RegexSearch(FuzzySearch):
pass
class RegexExtract(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
index = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.string)
class RegexReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringReplace(ValueOp):
arg = Arg(rlz.string)
pattern = Arg(rlz.string)
replacement = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringSplit(ValueOp):
arg = Arg(rlz.string)
delimiter = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.Array(dt.string))
class StringConcat(ValueOp):
arg = Arg(rlz.list_of(rlz.string))
output_type = rlz.shape_like('arg', dt.string)
class ParseURL(ValueOp):
arg = Arg(rlz.string)
extract = Arg(
rlz.isin(
{
'PROTOCOL',
'HOST',
'PATH',
'REF',
'AUTHORITY',
'FILE',
'USERINFO',
'QUERY',
}
)
)
key = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.string)
class StringLength(UnaryOp):
"""
Compute length of strings
Returns
-------
length : int32
"""
output_type = rlz.shape_like('arg', dt.int32)
class StringAscii(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
# ----------------------------------------------------------------------
class Reduction(ValueOp):
_reduction = True
class Count(Reduction):
arg = Arg((ir.ColumnExpr, ir.TableExpr))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class Arbitrary(Reduction):
arg = Arg(rlz.column(rlz.any))
how = Arg(rlz.isin({'first', 'last', 'heavy'}), default=None)
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitAnd(Reduction):
"""Aggregate bitwise AND operation.
All elements in an integer column are ANDed together. This can be used
to determine which bit flags are set on all elements.
Resources:
* `BigQuery BIT_AND
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_and>`_
* `MySQL BIT_AND
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-and>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitOr(Reduction):
"""Aggregate bitwise OR operation.
All elements in an integer column are ORed together. This can be used
to determine which bit flags are set on any element.
Resources:
* `BigQuery BIT_OR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_or>`_
* `MySQL BIT_OR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-or>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class BitXor(Reduction):
"""Aggregate bitwise XOR operation.
All elements in an integer column are XORed together. This can be used
as a parity checksum of element values.
Resources:
* `BigQuery BIT_XOR
<https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate_functions#bit_xor>`_
* `MySQL BIT_XOR
<https://dev.mysql.com/doc/refman/5.7/en/aggregate-functions.html#function_bit-xor>`_
"""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Sum(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.scalar_type()
class Mean(Reduction):
arg = Arg(rlz.column(rlz.numeric))
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type()
else:
dtype = dt.float64
return dtype.scalar_type()
class Quantile(Reduction):
arg = Arg(rlz.any)
quantile = Arg(rlz.strict_numeric)
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.float64.scalar_type()
class MultiQuantile(Quantile):
arg = Arg(rlz.any)
quantile = Arg(rlz.value(dt.Array(dt.float64)))
interpolation = Arg(
rlz.isin({'linear', 'lower', 'higher', 'midpoint', 'nearest'}),
default='linear',
)
def output_type(self):
return dt.Array(dt.float64).scalar_type()
class VarianceBase(Reduction):
arg = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.scalar_type()
class StandardDev(VarianceBase):
pass
class Variance(VarianceBase):
pass
class Correlation(Reduction):
"""Coefficient of correlation of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Covariance(Reduction):
"""Covariance of a set of number pairs."""
left = Arg(rlz.column(rlz.numeric))
right = Arg(rlz.column(rlz.numeric))
how = Arg(rlz.isin({'sample', 'pop'}), default=None)
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.float64.scalar_type()
class Max(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Min(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class HLLCardinality(Reduction):
"""Approximate number of unique values using HyperLogLog algorithm.
Impala offers the NDV built-in function for this.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
# Impala 2.0 and higher returns a DOUBLE
# return ir.DoubleScalar
return functools.partial(ir.IntegerScalar, dtype=dt.int64)
class GroupConcat(Reduction):
arg = Arg(rlz.column(rlz.any))
sep = Arg(rlz.string, default=',')
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.string.scalar_type()
class CMSMedian(Reduction):
"""
Compute the approximate median of a set of comparable values using the
Count-Min-Sketch algorithm. Exposed in Impala using APPX_MEDIAN.
"""
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
# ----------------------------------------------------------------------
# Analytic functions
class AnalyticOp(ValueOp):
pass
class WindowOp(ValueOp):
expr = Arg(rlz.noop)
window = Arg(rlz.noop)
output_type = rlz.array_like('expr')
display_argnames = False
def __init__(self, expr, window):
from ibis.expr.analysis import is_analytic
from ibis.expr.window import propagate_down_window
if not is_analytic(expr):
raise com.IbisInputError(
'Expression does not contain a valid window operation'
)
table = ir.find_base_table(expr)
if table is not None:
window = window.bind(table)
if window.max_lookback is not None:
error_msg = (
"'max lookback' windows must be ordered "
"by a timestamp column"
)
if len(window._order_by) != 1:
raise com.IbisInputError(error_msg)
order_var = window._order_by[0].op().args[0]
if not isinstance(order_var.type(), dt.Timestamp):
raise com.IbisInputError(error_msg)
expr = propagate_down_window(expr, window)
super().__init__(expr, window)
def over(self, window):
new_window = self.window.combine(window)
return WindowOp(self.expr, new_window)
@property
def inputs(self):
return self.expr.op().inputs[0], self.window
def root_tables(self):
return distinct_roots(
self.expr, *self.window._order_by, *self.window._group_by
)
class ShiftBase(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
offset = Arg(rlz.one_of((rlz.integer, rlz.interval)), default=None)
default = Arg(rlz.any, default=None)
output_type = rlz.typeof('arg')
class Lag(ShiftBase):
pass
class Lead(ShiftBase):
pass
class RankBase(AnalyticOp):
def output_type(self):
return dt.int64.column_type()
class MinRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order.
Examples
--------
values ranks
1 0
1 0
2 2
2 2
2 2
3 5
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL RANK()
arg = Arg(rlz.column(rlz.any))
class DenseRank(RankBase):
"""
Compute position of first element within each equal-value group in sorted
order, ignoring duplicate values.
Examples
--------
values ranks
1 0
1 0
2 1
2 1
2 1
3 2
Returns
-------
ranks : Int64Column, starting from 0
"""
# Equivalent to SQL DENSE_RANK()
arg = Arg(rlz.column(rlz.any))
class RowNumber(RankBase):
"""
Compute row number starting from 0 after sorting by column expression
Examples
--------
>>> import ibis
>>> t = ibis.table([('values', dt.int64)])
>>> w = ibis.window(order_by=t.values)
>>> row_num = ibis.row_number().over(w)
>>> result = t[t.values, row_num.name('row_num')]
Returns
-------
row_number : Int64Column, starting from 0
"""
# Equivalent to SQL ROW_NUMBER()
class CumulativeOp(AnalyticOp):
pass
class CumulativeSum(CumulativeOp):
"""Cumulative sum. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.BooleanValue):
dtype = dt.int64
else:
dtype = self.arg.type().largest
return dtype.column_type()
class CumulativeMean(CumulativeOp):
"""Cumulative mean. Requires an order window."""
arg = Arg(rlz.column(rlz.numeric))
def output_type(self):
if isinstance(self.arg, ir.DecimalValue):
dtype = self.arg.type().largest
else:
dtype = dt.float64
return dtype.column_type()
class CumulativeMax(CumulativeOp):
"""Cumulative max. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class CumulativeMin(CumulativeOp):
"""Cumulative min. Requires an order window."""
arg = Arg(rlz.column(rlz.any))
output_type = rlz.array_like('arg')
class PercentRank(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.shape_like('arg', dt.double)
class NTile(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
buckets = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.int64)
class FirstValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class LastValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
output_type = rlz.typeof('arg')
class NthValue(AnalyticOp):
arg = Arg(rlz.column(rlz.any))
nth = Arg(rlz.integer)
output_type = rlz.typeof('arg')
# ----------------------------------------------------------------------
# Distinct stuff
class Distinct(TableNode, HasSchema):
"""
Distinct is a table-level unique-ing operation.
In SQL, you might have:
SELECT DISTINCT foo
FROM table
SELECT DISTINCT foo, bar
FROM table
"""
table = Arg(ir.TableExpr)
def _validate(self):
# check whether schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.table.schema()
def blocks(self):
return True
class DistinctColumn(ValueOp):
"""
COUNT(DISTINCT ...) is really just syntactic suger, but we provide a
distinct().count() nicety for users nonetheless.
For all intents and purposes, like Distinct, but can be distinguished later
for evaluation if the result should be array-like versus table-like. Also
for calling count()
"""
arg = Arg(rlz.noop)
output_type = rlz.typeof('arg')
def count(self):
"""Only valid if the distinct contains a single column"""
return CountDistinct(self.arg)
class CountDistinct(Reduction):
arg = Arg(rlz.column(rlz.any))
where = Arg(rlz.boolean, default=None)
def output_type(self):
return dt.int64.scalar_type()
# ---------------------------------------------------------------------
# Boolean reductions and semi/anti join support
class Any(ValueOp):
# Depending on the kind of input boolean array, the result might either be
# array-like (an existence-type predicate) or scalar (a reduction)
arg = Arg(rlz.column(rlz.boolean))
@property
def _reduction(self):
roots = self.arg.op().root_tables()
return len(roots) < 2
def output_type(self):
if self._reduction:
return dt.boolean.scalar_type()
else:
return dt.boolean.column_type()
def negate(self):
return NotAny(self.arg)
class All(ValueOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.scalar_like('arg')
_reduction = True
def negate(self):
return NotAll(self.arg)
class NotAny(Any):
def negate(self):
return Any(self.arg)
class NotAll(All):
def negate(self):
return All(self.arg)
class CumulativeAny(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
class CumulativeAll(CumulativeOp):
arg = Arg(rlz.column(rlz.boolean))
output_type = rlz.typeof('arg')
# ---------------------------------------------------------------------
class TypedCaseBuilder:
__slots__ = ()
def type(self):
types = [result.type() for result in self.results]
return dt.highest_precedence(types)
def else_(self, result_expr):
"""
Specify
Returns
-------
builder : CaseBuilder
"""
kwargs = {
slot: getattr(self, slot)
for slot in self.__slots__
if slot != 'default'
}
result_expr = ir.as_value_expr(result_expr)
kwargs['default'] = result_expr
# Maintain immutability
return type(self)(**kwargs)
def end(self):
default = self.default
if default is None:
default = ir.null().cast(self.type())
args = [
getattr(self, slot) for slot in self.__slots__ if slot != 'default'
]
args.append(default)
op = self.__class__.case_op(*args)
return op.to_expr()
class SimpleCase(ValueOp):
base = Arg(rlz.any)
cases = Arg(rlz.list_of(rlz.any))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
return distinct_roots(
*itertools.chain(
[self.base],
self.cases,
self.results,
[] if self.default is None else [self.default],
)
)
def output_type(self):
exprs = self.results + [self.default]
return rlz.shape_like(self.base, dtype=exprs.type())
class SimpleCaseBuilder(TypedCaseBuilder):
__slots__ = 'base', 'cases', 'results', 'default'
case_op = SimpleCase
def __init__(self, base, cases=None, results=None, default=None):
self.base = base
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not rlz.comparable(self.base, case_expr):
raise TypeError(
'Base expression and passed case are not ' 'comparable'
)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(self.base, cases, results, self.default)
class SearchedCase(ValueOp):
cases = Arg(rlz.list_of(rlz.boolean))
results = Arg(rlz.list_of(rlz.any))
default = Arg(rlz.any)
def _validate(self):
assert len(self.cases) == len(self.results)
def root_tables(self):
cases, results, default = self.args
return distinct_roots(
*itertools.chain(
cases.values,
results.values,
[] if default is None else [default],
)
)
def output_type(self):
exprs = self.results + [self.default]
dtype = rlz.highest_precedence_dtype(exprs)
return rlz.shape_like(self.cases, dtype)
class SearchedCaseBuilder(TypedCaseBuilder):
__slots__ = 'cases', 'results', 'default'
case_op = SearchedCase
def __init__(self, cases=None, results=None, default=None):
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = ir.as_value_expr(case_expr)
result_expr = ir.as_value_expr(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(cases, results, self.default)
class Where(ValueOp):
"""
Ternary case expression, equivalent to
bool_expr.case()
.when(True, true_expr)
.else_(false_or_null_expr)
"""
bool_expr = Arg(rlz.boolean)
true_expr = Arg(rlz.any)
false_null_expr = Arg(rlz.any)
def output_type(self):
return rlz.shape_like(self.bool_expr, self.true_expr.type())
def _validate_join_tables(left, right):
if not isinstance(left, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'left table'.format(type(left).__name__)
)
if not isinstance(right, ir.TableExpr):
raise TypeError(
'Can only join table expressions, got {} for '
'right table'.format(type(right).__name__)
)
def _make_distinct_join_predicates(left, right, predicates):
# see GH #667
# If left and right table have a common parent expression (e.g. they
# have different filters), must add a self-reference and make the
# appropriate substitution in the join predicates
if left.equals(right):
right = right.view()
predicates = _clean_join_predicates(left, right, predicates)
return left, right, predicates
def _clean_join_predicates(left, right, predicates):
import ibis.expr.analysis as L
result = []
if not isinstance(predicates, (list, tuple)):
predicates = [predicates]
for pred in predicates:
if isinstance(pred, tuple):
if len(pred) != 2:
raise com.ExpressionError('Join key tuple must be ' 'length 2')
lk, rk = pred
lk = left._ensure_expr(lk)
rk = right._ensure_expr(rk)
pred = lk == rk
elif isinstance(pred, str):
pred = left[pred] == right[pred]
elif not isinstance(pred, ir.Expr):
raise NotImplementedError
if not isinstance(pred, ir.BooleanColumn):
raise com.ExpressionError('Join predicate must be comparison')
preds = L.flatten_predicate(pred)
result.extend(preds)
_validate_join_predicates(left, right, result)
return result
def _validate_join_predicates(left, right, predicates):
from ibis.expr.analysis import fully_originate_from
# Validate join predicates. Each predicate must be valid jointly when
# considering the roots of each input table
for predicate in predicates:
if not fully_originate_from(predicate, [left, right]):
raise com.RelationError(
'The expression {!r} does not fully '
'originate from dependencies of the table '
'expression.'.format(predicate)
)
class Join(TableNode):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def __init__(self, left, right, predicates):
_validate_join_tables(left, right)
left, right, predicates = _make_distinct_join_predicates(
left, right, predicates
)
super().__init__(left, right, predicates)
def _get_schema(self):
# For joins retaining both table schemas, merge them together here
left = self.left
right = self.right
if not left._is_materialized():
left = left.materialize()
if not right._is_materialized():
right = right.materialize()
sleft = left.schema()
sright = right.schema()
overlap = set(sleft.names) & set(sright.names)
if overlap:
raise com.RelationError(
'Joined tables have overlapping names: %s' % str(list(overlap))
)
return sleft.append(sright)
def has_schema(self):
return False
def root_tables(self):
if util.all_of([self.left.op(), self.right.op()], (Join, Selection)):
# Unraveling is not possible
return [self.left.op(), self.right.op()]
else:
return distinct_roots(self.left, self.right)
class InnerJoin(Join):
pass
class LeftJoin(Join):
pass
class RightJoin(Join):
pass
class OuterJoin(Join):
pass
class AnyInnerJoin(Join):
pass
class AnyLeftJoin(Join):
pass
class LeftSemiJoin(Join):
def _get_schema(self):
return self.left.schema()
class LeftAntiJoin(Join):
def _get_schema(self):
return self.left.schema()
class MaterializedJoin(TableNode, HasSchema):
join = Arg(ir.TableExpr)
def _validate(self):
assert isinstance(self.join.op(), Join)
# check whether the underlying schema has overlapping columns or not
assert self.schema
@cached_property
def schema(self):
return self.join.op()._get_schema()
def root_tables(self):
return self.join.op().root_tables()
def blocks(self):
return True
class CrossJoin(InnerJoin):
"""
Some databases have a CROSS JOIN operator, that may be preferential to use
over an INNER JOIN with no predicates.
"""
def __init__(self, *args, **kwargs):
if 'prefixes' in kwargs:
raise NotImplementedError
if len(args) < 2:
raise com.IbisInputError('Must pass at least 2 tables')
left = args[0]
right = args[1]
for t in args[2:]:
right = right.cross_join(t)
InnerJoin.__init__(self, left, right, [])
class AsOfJoin(Join):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
predicates = Arg(rlz.noop)
by = Arg(rlz.noop, default=None)
tolerance = Arg(rlz.interval(), default=None)
def __init__(self, left, right, predicates, by, tolerance):
super().__init__(left, right, predicates)
self.by = _clean_join_predicates(self.left, self.right, by)
self.tolerance = tolerance
self._validate_args(['by', 'tolerance'])
def _validate_args(self, args: List[str]):
for arg in args:
argument = self.signature[arg]
value = argument.validate(getattr(self, arg))
setattr(self, arg, value)
class SetOp(TableNode, HasSchema):
left = Arg(rlz.noop)
right = Arg(rlz.noop)
def _validate(self):
if not self.left.schema().equals(self.right.schema()):
raise com.RelationError(
'Table schemas must be equal for set operations'
)
@cached_property
def schema(self):
return self.left.schema()
def blocks(self):
return True
class Union(SetOp):
distinct = Arg(rlz.validator(bool), default=False)
class Intersection(SetOp):
pass
class Difference(SetOp):
pass
class Limit(TableNode):
table = Arg(ir.TableExpr)
n = Arg(rlz.validator(int))
offset = Arg(rlz.validator(int))
def blocks(self):
return True
@property
def schema(self):
return self.table.schema()
def has_schema(self):
return self.table.op().has_schema()
def root_tables(self):
return [self]
# --------------------------------------------------------------------
# Sorting
def to_sort_key(table, key):
if isinstance(key, DeferredSortKey):
key = key.resolve(table)
if isinstance(key, ir.SortExpr):
return key
if isinstance(key, (tuple, list)):
key, sort_order = key
else:
sort_order = True
if not isinstance(key, ir.Expr):
key = table._ensure_expr(key)
if isinstance(key, (ir.SortExpr, DeferredSortKey)):
return to_sort_key(table, key)
if isinstance(sort_order, str):
if sort_order.lower() in ('desc', 'descending'):
sort_order = False
elif not isinstance(sort_order, bool):
sort_order = bool(sort_order)
return SortKey(key, ascending=sort_order).to_expr()
class SortKey(Node):
expr = Arg(rlz.column(rlz.any))
ascending = Arg(rlz.validator(bool), default=True)
def __repr__(self):
# Temporary
rows = [
'Sort key:',
' ascending: {0!s}'.format(self.ascending),
util.indent(_safe_repr(self.expr), 2),
]
return '\n'.join(rows)
def output_type(self):
return ir.SortExpr
def root_tables(self):
return self.expr.op().root_tables()
def equals(self, other, cache=None):
# TODO: might generalize this equals based on fields
# requires a proxy class with equals for non expr values
return (
isinstance(other, SortKey)
and self.expr.equals(other.expr, cache=cache)
and self.ascending == other.ascending
)
def resolve_name(self):
return self.expr.get_name()
class DeferredSortKey:
def __init__(self, what, ascending=True):
self.what = what
self.ascending = ascending
def resolve(self, parent):
what = parent._ensure_expr(self.what)
return SortKey(what, ascending=self.ascending).to_expr()
class SelfReference(TableNode, HasSchema):
table = Arg(ir.TableExpr)
@cached_property
def schema(self):
return self.table.schema()
def root_tables(self):
# The dependencies of this operation are not walked, which makes the
# table expression holding this relationally distinct from other
# expressions, so things like self-joins are possible
return [self]
def blocks(self):
return True
class Selection(TableNode, HasSchema):
table = Arg(ir.TableExpr)
selections = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self, table, selections=None, predicates=None, sort_keys=None
):
import ibis.expr.analysis as L
# Argument cleaning
selections = util.promote_list(
selections if selections is not None else []
)
projections = []
for selection in selections:
if isinstance(selection, str):
projection = table[selection]
else:
projection = selection
projections.append(projection)
sort_keys = [
to_sort_key(table, k)
for k in util.promote_list(
sort_keys if sort_keys is not None else []
)
]
predicates = list(
toolz.concat(
map(
L.flatten_predicate,
predicates if predicates is not None else [],
)
)
)
super().__init__(
table=table,
selections=projections,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator
# Need to validate that the column expressions are compatible with the
# input table; this means they must either be scalar expressions or
# array expressions originating from the same root table expression
dependent_exprs = self.selections + self.sort_keys
self.table._assert_valid(dependent_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate no overlapping columns in schema
assert self.schema
@cached_property
def schema(self):
# Resolve schema and initialize
if not self.selections:
return self.table.schema()
types = []
names = []
for projection in self.selections:
if isinstance(projection, ir.DestructColumn):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = projection.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
elif isinstance(projection, ir.ValueExpr):
names.append(projection.get_name())
types.append(projection.type())
elif isinstance(projection, ir.TableExpr):
schema = projection.schema()
names.extend(schema.names)
types.extend(schema.types)
return Schema(names, types)
def blocks(self):
return bool(self.selections)
def substitute_table(self, table_expr):
return Selection(table_expr, self.selections)
def root_tables(self):
return [self]
def can_add_filters(self, wrapped_expr, predicates):
pass
@staticmethod
def empty_or_equal(lefts, rights):
return not lefts or not rights or all_equal(lefts, rights)
def compatible_with(self, other):
# self and other are equivalent except for predicates, selections, or
# sort keys any of which is allowed to be empty. If both are not empty
# then they must be equal
if self.equals(other):
return True
if not isinstance(other, type(self)):
return False
return self.table.equals(other.table) and (
self.empty_or_equal(self.predicates, other.predicates)
and self.empty_or_equal(self.selections, other.selections)
and self.empty_or_equal(self.sort_keys, other.sort_keys)
)
# Operator combination / fusion logic
def aggregate(self, this, metrics, by=None, having=None):
if len(self.selections) > 0:
return Aggregation(this, metrics, by=by, having=having)
else:
helper = AggregateSelection(this, metrics, by, having)
return helper.get_result()
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
if not self.blocks():
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Selection(
self.table,
self.selections,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class AggregateSelection:
# sort keys cannot be discarded because of order-dependent
# aggregate functions like GROUP_CONCAT
def __init__(self, parent, metrics, by, having):
self.parent = parent
self.op = parent.op()
self.metrics = metrics
self.by = by
self.having = having
def get_result(self):
if self.op.blocks():
return self._plain_subquery()
else:
return self._attempt_pushdown()
def _plain_subquery(self):
return Aggregation(
self.parent, self.metrics, by=self.by, having=self.having
)
def _attempt_pushdown(self):
metrics_valid, lowered_metrics = self._pushdown_exprs(self.metrics)
by_valid, lowered_by = self._pushdown_exprs(self.by)
having_valid, lowered_having = self._pushdown_exprs(
self.having or None
)
if metrics_valid and by_valid and having_valid:
return Aggregation(
self.op.table,
lowered_metrics,
by=lowered_by,
having=lowered_having,
predicates=self.op.predicates,
sort_keys=self.op.sort_keys,
)
else:
return self._plain_subquery()
def _pushdown_exprs(self, exprs):
import ibis.expr.analysis as L
if exprs is None:
return True, []
resolved = self.op.table._resolve(exprs)
subbed_exprs = []
valid = False
if resolved:
for x in util.promote_list(resolved):
subbed = L.sub_for(x, [(self.parent, self.op.table)])
subbed_exprs.append(subbed)
valid = self.op.table._is_valid(subbed_exprs)
else:
valid = False
return valid, subbed_exprs
def _maybe_convert_sort_keys(table, exprs):
try:
return [to_sort_key(table, k) for k in util.promote_list(exprs)]
except com.IbisError:
return None
class Aggregation(TableNode, HasSchema):
"""
metrics : per-group scalar aggregates
by : group expressions
having : post-aggregation predicate
TODO: not putting this in the aggregate operation yet
where : pre-aggregation predicate
"""
table = Arg(ir.TableExpr)
metrics = Arg(rlz.noop)
by = Arg(rlz.noop)
having = Arg(rlz.noop, default=None)
predicates = Arg(rlz.noop, default=None)
sort_keys = Arg(rlz.noop, default=None)
def __init__(
self,
table,
metrics,
by=None,
having=None,
predicates=None,
sort_keys=None,
):
# For tables, like joins, that are not materialized
metrics = self._rewrite_exprs(table, metrics)
by = [] if by is None else by
by = table._resolve(by)
having = [] if having is None else having
predicates = [] if predicates is None else predicates
# order by only makes sense with group by in an aggregation
sort_keys = [] if not by or sort_keys is None else sort_keys
sort_keys = [
to_sort_key(table, k) for k in util.promote_list(sort_keys)
]
by = self._rewrite_exprs(table, by)
having = self._rewrite_exprs(table, having)
predicates = self._rewrite_exprs(table, predicates)
sort_keys = self._rewrite_exprs(table, sort_keys)
super().__init__(
table=table,
metrics=metrics,
by=by,
having=having,
predicates=predicates,
sort_keys=sort_keys,
)
def _validate(self):
from ibis.expr.analysis import FilterValidator, is_reduction
# All aggregates are valid
for expr in self.metrics:
if not isinstance(expr, ir.ScalarExpr) or not is_reduction(expr):
raise TypeError(
'Passed a non-aggregate expression: %s' % _safe_repr(expr)
)
for expr in self.having:
if not isinstance(expr, ir.BooleanScalar):
raise com.ExpressionError(
'Having clause must be boolean '
'expression, was: {0!s}'.format(_safe_repr(expr))
)
# All non-scalar refs originate from the input table
all_exprs = self.metrics + self.by + self.having + self.sort_keys
self.table._assert_valid(all_exprs)
# Validate predicates
validator = FilterValidator([self.table])
validator.validate_all(self.predicates)
# Validate schema has no overlapping columns
assert self.schema
def _rewrite_exprs(self, table, what):
what = util.promote_list(what)
all_exprs = []
for expr in what:
if isinstance(expr, ir.ExprList):
all_exprs.extend(expr.exprs())
else:
bound_expr = ir.bind_expr(table, expr)
all_exprs.append(bound_expr)
return all_exprs
# TODO - #2832
# this optimization becomes O(n^2) when it calls into
# _lift_TableColumn in analysis.py, which itself is O(n) and is
# called on each input to the aggregation - thus creating the
# aggregation expression can be extremely slow on wide tables
# that contain a Selection.
# return [
# substitute_parents(x, past_projection=False) for x in all_exprs
# ]
def blocks(self):
return True
def substitute_table(self, table_expr):
return Aggregation(
table_expr, self.metrics, by=self.by, having=self.having
)
@cached_property
def schema(self):
names = []
types = []
for e in self.by + self.metrics:
if isinstance(e, ir.DestructValue):
# If this is a destruct, then we destructure
# the result and assign to multiple columns
struct_type = e.type()
for name in struct_type.names:
names.append(name)
types.append(struct_type[name])
else:
names.append(e.get_name())
types.append(e.type())
return Schema(names, types)
def sort_by(self, expr, sort_exprs):
sort_exprs = util.promote_list(sort_exprs)
resolved_keys = _maybe_convert_sort_keys(self.table, sort_exprs)
if resolved_keys and self.table._is_valid(resolved_keys):
return Aggregation(
self.table,
self.metrics,
by=self.by,
having=self.having,
predicates=self.predicates,
sort_keys=self.sort_keys + resolved_keys,
)
return Selection(expr, [], sort_keys=sort_exprs)
class NumericBinaryOp(BinaryOp):
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
class Add(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.add)
class Multiply(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mul)
class Power(NumericBinaryOp):
def output_type(self):
if util.all_of(self.args, ir.IntegerValue):
return rlz.shape_like(self.args, dt.float64)
else:
return rlz.shape_like(self.args)
class Subtract(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.sub)
class Divide(NumericBinaryOp):
output_type = rlz.shape_like('args', dt.float64)
class FloorDivide(Divide):
output_type = rlz.shape_like('args', dt.int64)
class LogicalBinaryOp(BinaryOp):
left = Arg(rlz.boolean)
right = Arg(rlz.boolean)
output_type = rlz.shape_like('args', dt.boolean)
class Not(UnaryOp):
arg = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.boolean)
class Modulus(NumericBinaryOp):
output_type = rlz.numeric_like('args', operator.mod)
class And(LogicalBinaryOp):
pass
class Or(LogicalBinaryOp):
pass
class Xor(LogicalBinaryOp):
pass
class Comparison(BinaryOp, BooleanValueOp):
left = Arg(rlz.any)
right = Arg(rlz.any)
def __init__(self, left, right):
"""
Casting rules for type promotions (for resolving the output type) may
depend in some cases on the target backend.
TODO: how will overflows be handled? Can we provide anything useful in
Ibis to help the user avoid them?
:param left:
:param right:
"""
super().__init__(*self._maybe_cast_args(left, right))
def _maybe_cast_args(self, left, right):
# it might not be necessary?
with suppress(com.IbisTypeError):
return left, rlz.cast(right, left)
with suppress(com.IbisTypeError):
return rlz.cast(left, right), right
return left, right
def output_type(self):
if not rlz.comparable(self.left, self.right):
raise TypeError(
'Arguments with datatype {} and {} are '
'not comparable'.format(self.left.type(), self.right.type())
)
return rlz.shape_like(self.args, dt.boolean)
class Equals(Comparison):
pass
class NotEquals(Comparison):
pass
class GreaterEqual(Comparison):
pass
class Greater(Comparison):
pass
class LessEqual(Comparison):
pass
class Less(Comparison):
pass
class IdenticalTo(Comparison):
pass
class Between(ValueOp, BooleanValueOp):
arg = Arg(rlz.any)
lower_bound = Arg(rlz.any)
upper_bound = Arg(rlz.any)
def output_type(self):
arg, lower, upper = self.args
if not (rlz.comparable(arg, lower) and rlz.comparable(arg, upper)):
raise TypeError('Arguments are not comparable')
return rlz.shape_like(self.args, dt.boolean)
class BetweenTime(Between):
arg = Arg(rlz.one_of([rlz.timestamp, rlz.time]))
lower_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
upper_bound = Arg(rlz.one_of([rlz.time, rlz.string]))
class Contains(ValueOp, BooleanValueOp):
value = Arg(rlz.any)
options = Arg(
rlz.one_of(
[
rlz.list_of(rlz.any),
rlz.set_,
rlz.column(rlz.any),
rlz.array_of(rlz.any),
]
)
)
def __init__(self, value, options):
# it can be a single expression, like a column
if not isinstance(options, ir.Expr):
if util.any_of(options, ir.Expr):
# or a list of expressions
options = ir.sequence(options)
else:
# or a set of scalar values
options = frozenset(options)
super().__init__(value, options)
def output_type(self):
all_args = [self.value]
if isinstance(self.options, ir.ListExpr):
all_args += self.options
else:
all_args += [self.options]
return rlz.shape_like(all_args, dt.boolean)
class NotContains(Contains):
pass
class ReplaceValues(ValueOp):
"""
Apply a multi-value replacement on a particular column. As an example from
SQL, given DAYOFWEEK(timestamp_col), replace 1 through 5 to "WEEKDAY" and 6
and 7 to "WEEKEND"
"""
pass
class SummaryFilter(ValueOp):
expr = Arg(rlz.noop)
def output_type(self):
return dt.boolean.column_type()
class TopK(ValueOp):
arg = Arg(rlz.noop)
k = Arg(int)
by = Arg(rlz.noop)
def __init__(self, arg, k, by=None):
if by is None:
by = arg.count()
if not isinstance(arg, ir.ColumnExpr):
raise TypeError(arg)
if not isinstance(k, int) or k < 0:
raise ValueError('k must be positive integer, was: {0}'.format(k))
super().__init__(arg, k, by)
def output_type(self):
return ir.TopKExpr
def blocks(self):
return True
class Constant(ValueOp):
pass
class TimestampNow(Constant):
def output_type(self):
return dt.timestamp.scalar_type()
class RandomScalar(Constant):
def output_type(self):
return dt.float64.scalar_type()
class E(Constant):
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class Pi(Constant):
"""
The constant pi
"""
def output_type(self):
return functools.partial(ir.FloatingScalar, dtype=dt.float64)
class TemporalUnaryOp(UnaryOp):
arg = Arg(rlz.temporal)
class TimestampUnaryOp(UnaryOp):
arg = Arg(rlz.timestamp)
_date_units = {
'Y': 'Y',
'y': 'Y',
'year': 'Y',
'YEAR': 'Y',
'YYYY': 'Y',
'SYYYY': 'Y',
'YYY': 'Y',
'YY': 'Y',
'Q': 'Q',
'q': 'Q',
'quarter': 'Q',
'QUARTER': 'Q',
'M': 'M',
'month': 'M',
'MONTH': 'M',
'w': 'W',
'W': 'W',
'week': 'W',
'WEEK': 'W',
'd': 'D',
'D': 'D',
'J': 'D',
'day': 'D',
'DAY': 'D',
}
_time_units = {
'h': 'h',
'H': 'h',
'HH24': 'h',
'hour': 'h',
'HOUR': 'h',
'm': 'm',
'MI': 'm',
'minute': 'm',
'MINUTE': 'm',
's': 's',
'second': 's',
'SECOND': 's',
'ms': 'ms',
'millisecond': 'ms',
'MILLISECOND': 'ms',
'us': 'us',
'microsecond': 'ms',
'MICROSECOND': 'ms',
'ns': 'ns',
'nanosecond': 'ns',
'NANOSECOND': 'ns',
}
_timestamp_units = toolz.merge(_date_units, _time_units)
class TimestampTruncate(ValueOp):
arg = Arg(rlz.timestamp)
unit = Arg(rlz.isin(_timestamp_units))
output_type = rlz.shape_like('arg', dt.timestamp)
class DateTruncate(ValueOp):
arg = Arg(rlz.date)
unit = Arg(rlz.isin(_date_units))
output_type = rlz.shape_like('arg', dt.date)
class TimeTruncate(ValueOp):
arg = Arg(rlz.time)
unit = Arg(rlz.isin(_time_units))
output_type = rlz.shape_like('arg', dt.time)
class Strftime(ValueOp):
arg = Arg(rlz.temporal)
format_str = Arg(rlz.string)
output_type = rlz.shape_like('arg', dt.string)
class StringToTimestamp(ValueOp):
arg = Arg(rlz.string)
format_str = Arg(rlz.string)
timezone = Arg(rlz.string, default=None)
output_type = rlz.shape_like('arg', dt.Timestamp(timezone='UTC'))
class ExtractTemporalField(TemporalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
ExtractTimestampField = ExtractTemporalField
class ExtractDateField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
class ExtractTimeField(ExtractTemporalField):
arg = Arg(rlz.one_of([rlz.time, rlz.timestamp]))
class ExtractYear(ExtractDateField):
pass
class ExtractMonth(ExtractDateField):
pass
class ExtractDay(ExtractDateField):
pass
class ExtractDayOfYear(ExtractDateField):
pass
class ExtractQuarter(ExtractDateField):
pass
class ExtractEpochSeconds(ExtractDateField):
pass
class ExtractWeekOfYear(ExtractDateField):
pass
class ExtractHour(ExtractTimeField):
pass
class ExtractMinute(ExtractTimeField):
pass
class ExtractSecond(ExtractTimeField):
pass
class ExtractMillisecond(ExtractTimeField):
pass
class DayOfWeekIndex(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.int16)
class DayOfWeekName(UnaryOp):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
output_type = rlz.shape_like('arg', dt.string)
class DayOfWeekNode(Node):
arg = Arg(rlz.one_of([rlz.date, rlz.timestamp]))
def output_type(self):
return ir.DayOfWeek
class Time(UnaryOp):
output_type = rlz.shape_like('arg', dt.time)
class Date(UnaryOp):
output_type = rlz.shape_like('arg', dt.date)
class TimestampFromUNIX(ValueOp):
arg = Arg(rlz.any)
# Only pandas-based backends support 'ns'
unit = Arg(rlz.isin({'s', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('arg', dt.timestamp)
class DecimalUnaryOp(UnaryOp):
arg = Arg(rlz.decimal)
class DecimalPrecision(DecimalUnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class DecimalScale(UnaryOp):
output_type = rlz.shape_like('arg', dt.int32)
class Hash(ValueOp):
arg = Arg(rlz.any)
how = Arg(rlz.isin({'fnv', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', dt.int64)
class HashBytes(ValueOp):
arg = Arg(rlz.one_of({rlz.value(dt.string), rlz.value(dt.binary)}))
how = Arg(rlz.isin({'md5', 'sha1', 'sha256', 'sha512'}))
output_type = rlz.shape_like('arg', dt.binary)
class DateAdd(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateSub(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.interval(units={'Y', 'Q', 'M', 'W', 'D'}))
output_type = rlz.shape_like('left')
class DateDiff(BinaryOp):
left = Arg(rlz.date)
right = Arg(rlz.date)
output_type = rlz.shape_like('left', dt.Interval('D'))
class TimeAdd(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeSub(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.interval(units={'h', 'm', 's', 'ms', 'us', 'ns'}))
output_type = rlz.shape_like('left')
class TimeDiff(BinaryOp):
left = Arg(rlz.time)
right = Arg(rlz.time)
output_type = rlz.shape_like('left', dt.Interval('s'))
class TimestampAdd(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampSub(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(
rlz.interval(
units={'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'}
)
)
output_type = rlz.shape_like('left')
class TimestampDiff(BinaryOp):
left = Arg(rlz.timestamp)
right = Arg(rlz.timestamp)
output_type = rlz.shape_like('left', dt.Interval('s'))
class IntervalBinaryOp(BinaryOp):
def output_type(self):
args = [
arg.cast(arg.type().value_type)
if isinstance(arg.type(), dt.Interval)
else arg
for arg in self.args
]
expr = rlz.numeric_like(args, self.__class__.op)(self)
left_dtype = self.left.type()
dtype_type = type(left_dtype)
additional_args = {
attr: getattr(left_dtype, attr)
for attr in dtype_type.__slots__
if attr not in {'unit', 'value_type'}
}
dtype = dtype_type(left_dtype.unit, expr.type(), **additional_args)
return rlz.shape_like(self.args, dtype=dtype)
class IntervalAdd(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.add
class IntervalSubtract(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.interval)
op = operator.sub
class IntervalMultiply(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.mul
class IntervalFloorDivide(IntervalBinaryOp):
left = Arg(rlz.interval)
right = Arg(rlz.numeric)
op = operator.floordiv
class IntervalFromInteger(ValueOp):
arg = Arg(rlz.integer)
unit = Arg(
rlz.isin({'Y', 'Q', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'})
)
@property
def resolution(self):
return dt.Interval(self.unit).resolution
def output_type(self):
dtype = dt.Interval(self.unit, self.arg.type())
return rlz.shape_like(self.arg, dtype=dtype)
class ArrayColumn(ValueOp):
cols = Arg(rlz.list_of(rlz.column(rlz.any), min_length=1))
def _validate(self):
if len({col.type() for col in self.cols}) > 1:
raise com.IbisTypeError(
f'The types of all input columns must match exactly in a '
f'{type(self).__name__} operation.'
)
def output_type(self):
first_dtype = self.cols[0].type()
return dt.Array(first_dtype).column_type()
class ArrayLength(UnaryOp):
arg = Arg(rlz.array)
output_type = rlz.shape_like('arg', dt.int64)
class ArraySlice(ValueOp):
arg = Arg(rlz.array)
start = Arg(rlz.integer)
stop = Arg(rlz.integer, default=None)
output_type = rlz.typeof('arg')
class ArrayIndex(ValueOp):
arg = Arg(rlz.array)
index = Arg(rlz.integer)
def output_type(self):
value_dtype = self.arg.type().value_type
return rlz.shape_like(self.arg, value_dtype)
class ArrayConcat(ValueOp):
left = Arg(rlz.array)
right = Arg(rlz.array)
output_type = rlz.shape_like('left')
def _validate(self):
left_dtype, right_dtype = self.left.type(), self.right.type()
if left_dtype != right_dtype:
raise com.IbisTypeError(
'Array types must match exactly in a {} operation. '
'Left type {} != Right type {}'.format(
type(self).__name__, left_dtype, right_dtype
)
)
class ArrayRepeat(ValueOp):
arg = Arg(rlz.array)
times = Arg(rlz.integer)
output_type = rlz.typeof('arg')
class ArrayCollect(Reduction):
arg = Arg(rlz.column(rlz.any))
def output_type(self):
dtype = dt.Array(self.arg.type())
return dtype.scalar_type()
class MapLength(ValueOp):
arg = Arg(rlz.mapping)
output_type = rlz.shape_like('arg', dt.int64)
class MapValueForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
def output_type(self):
return rlz.shape_like(tuple(self.args), self.arg.type().value_type)
class MapValueOrDefaultForKey(ValueOp):
arg = Arg(rlz.mapping)
key = Arg(rlz.one_of([rlz.string, rlz.integer]))
default = Arg(rlz.any)
def output_type(self):
arg = self.arg
default = self.default
map_type = arg.type()
value_type = map_type.value_type
default_type = default.type()
if default is not None and not dt.same_kind(default_type, value_type):
raise com.IbisTypeError(
"Default value\n{}\nof type {} cannot be cast to map's value "
"type {}".format(default, default_type, value_type)
)
result_type = dt.highest_precedence((default_type, value_type))
return rlz.shape_like(tuple(self.args), result_type)
class MapKeys(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().key_type))
class MapValues(ValueOp):
arg = Arg(rlz.mapping)
def output_type(self):
arg = self.arg
return rlz.shape_like(arg, dt.Array(arg.type().value_type))
class MapConcat(ValueOp):
left = Arg(rlz.mapping)
right = Arg(rlz.mapping)
output_type = rlz.typeof('left')
class StructField(ValueOp):
arg = Arg(rlz.struct)
field = Arg(str)
def output_type(self):
struct_dtype = self.arg.type()
value_dtype = struct_dtype[self.field]
return rlz.shape_like(self.arg, value_dtype)
class Literal(ValueOp):
value = Arg(rlz.noop)
dtype = Arg(dt.dtype)
def __repr__(self):
return '{}({})'.format(
type(self).__name__, ', '.join(map(repr, self.args))
)
def equals(self, other, cache=None):
# Check types
if not (
isinstance(other, Literal)
and isinstance(other.value, type(self.value))
and self.dtype == other.dtype
):
return False
# Check values
if isinstance(self.value, np.ndarray):
return np.array_equal(self.value, other.value)
else:
return self.value == other.value
def output_type(self):
return self.dtype.scalar_type()
def root_tables(self):
return []
def __hash__(self) -> int:
"""Return the hash of a literal value.
We override this method to make sure that we can handle things that
aren't eminently hashable like an ``array<array<int64>>``.
"""
return hash(self.dtype._literal_value_hash_key(self.value))
class NullLiteral(Literal):
"""Typeless NULL literal"""
value = Arg(type(None), default=None)
dtype = Arg(dt.Null, default=dt.null)
class ScalarParameter(ValueOp):
_counter = itertools.count()
dtype = Arg(dt.dtype)
counter = Arg(int, default=lambda: next(ScalarParameter._counter))
def resolve_name(self):
return 'param_{:d}'.format(self.counter)
def __repr__(self):
return '{}(type={})'.format(type(self).__name__, self.dtype)
def __hash__(self):
return hash((self.dtype, self.counter))
def output_type(self):
return self.dtype.scalar_type()
def equals(self, other, cache=None):
return (
isinstance(other, ScalarParameter)
and self.counter == other.counter
and self.dtype.equals(other.dtype, cache=cache)
)
@property
def inputs(self):
return ()
def root_tables(self):
return []
class ExpressionList(Node):
"""Data structure for a list of arbitrary expressions"""
exprs = Arg(rlz.noop)
def __init__(self, values):
super().__init__(list(map(rlz.any, values)))
@property
def inputs(self):
return (tuple(self.exprs),)
def root_tables(self):
return distinct_roots(self.exprs)
def output_type(self):
return ir.ExprList
class ValueList(ValueOp):
"""Data structure for a list of value expressions"""
values = Arg(rlz.noop)
display_argnames = False # disable showing argnames in repr
def __init__(self, values):
super().__init__(tuple(map(rlz.any, values)))
def output_type(self):
dtype = rlz.highest_precedence_dtype(self.values)
return functools.partial(ir.ListExpr, dtype=dtype)
def root_tables(self):
return distinct_roots(*self.values)
# ----------------------------------------------------------------------
# GeoSpatial operations
class GeoSpatialBinOp(BinaryOp):
"""Geo Spatial base binary"""
left = Arg(rlz.geospatial)
right = Arg(rlz.geospatial)
class GeoSpatialUnOp(UnaryOp):
"""Geo Spatial base unary"""
arg = Arg(rlz.geospatial)
class GeoDistance(GeoSpatialBinOp):
"""Returns minimum distance between two geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoContains(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoContainsProperly(GeoSpatialBinOp):
"""Check if the first geo spatial data contains the second one,
and no boundary points are shared."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCovers(GeoSpatialBinOp):
"""Returns True if no point in Geometry B is outside Geometry A"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCoveredBy(GeoSpatialBinOp):
"""Returns True if no point in Geometry/Geography A is
outside Geometry/Geography B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoCrosses(GeoSpatialBinOp):
"""Returns True if the supplied geometries have some, but not all,
interior points in common."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoDisjoint(GeoSpatialBinOp):
"""Returns True if the Geometries do not “spatially intersect” -
if they do not share any space together."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoEquals(GeoSpatialBinOp):
"""Returns True if the given geometries represent the same geometry."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoGeometryN(GeoSpatialUnOp):
"""Returns the Nth Geometry of a Multi geometry."""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoGeometryType(GeoSpatialUnOp):
"""Returns the type of the geometry."""
output_type = rlz.shape_like('args', dt.string)
class GeoIntersects(GeoSpatialBinOp):
"""Returns True if the Geometries/Geography “spatially intersect in 2D”
- (share any portion of space) and False if they don’t (they are Disjoint).
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIsValid(GeoSpatialUnOp):
"""Returns true if the geometry is well-formed."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoLineLocatePoint(GeoSpatialBinOp):
"""
Locate the distance a point falls along the length of a line.
Returns a float between zero and one representing the location of the
closest point on the linestring to the given point, as a fraction of the
total 2d line length.
"""
left = Arg(rlz.linestring)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.halffloat)
class GeoLineMerge(GeoSpatialUnOp):
"""
Merge a MultiLineString into a LineString.
Returns a (set of) LineString(s) formed by sewing together the
constituent line work of a multilinestring. If a geometry other than
a linestring or multilinestring is given, this will return an empty
geometry collection.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoLineSubstring(GeoSpatialUnOp):
"""
Clip a substring from a LineString.
Returns a linestring that is a substring of the input one, starting
and ending at the given fractions of the total 2d length. The second
and third arguments are floating point values between zero and one.
This only works with linestrings.
"""
arg = Arg(rlz.linestring)
start = Arg(rlz.floating)
end = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.linestring)
class GeoOrderingEquals(GeoSpatialBinOp):
"""
Check if two geometries are equal and have the same point ordering.
Returns true if the two geometries are equal and the coordinates
are in the same order.
"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoOverlaps(GeoSpatialBinOp):
"""Returns True if the Geometries share space, are of the same dimension,
but are not completely contained by each other."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoTouches(GeoSpatialBinOp):
"""Returns True if the geometries have at least one point in common,
but their interiors do not intersect."""
output_type = rlz.shape_like('args', dt.boolean)
class GeoUnaryUnion(Reduction):
"""Returns the pointwise union of the geometries in the column."""
arg = Arg(rlz.column(rlz.geospatial))
def output_type(self):
return dt.geometry.scalar_type()
class GeoUnion(GeoSpatialBinOp):
"""Returns the pointwise union of the two geometries."""
output_type = rlz.shape_like('args', dt.geometry)
class GeoArea(GeoSpatialUnOp):
"""Area of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoPerimeter(GeoSpatialUnOp):
"""Perimeter of the geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoLength(GeoSpatialUnOp):
"""Length of geo spatial data"""
output_type = rlz.shape_like('args', dt.float64)
class GeoMaxDistance(GeoSpatialBinOp):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoX(GeoSpatialUnOp):
"""Return the X coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoY(GeoSpatialUnOp):
"""Return the Y coordinate of the point, or NULL if not available.
Input must be a point
"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoXMax(GeoSpatialUnOp):
"""Returns X maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMin(GeoSpatialUnOp):
"""Returns Y minima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoYMax(GeoSpatialUnOp):
"""Returns Y maxima of a bounding box 2d or 3d or a geometry"""
output_type = rlz.shape_like('args', dt.float64)
class GeoStartPoint(GeoSpatialUnOp):
"""Returns the first point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoEndPoint(GeoSpatialUnOp):
"""Returns the last point of a LINESTRING geometry as a POINT or
NULL if the input parameter is not a LINESTRING
"""
output_type = rlz.shape_like('arg', dt.point)
class GeoPoint(GeoSpatialBinOp):
"""
Return a point constructed on the fly from the provided coordinate values.
Constant coordinates result in construction of a POINT literal.
"""
left = Arg(rlz.numeric)
right = Arg(rlz.numeric)
output_type = rlz.shape_like('args', dt.point)
class GeoPointN(GeoSpatialUnOp):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
"""
n = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.point)
class GeoNPoints(GeoSpatialUnOp):
"""Return the number of points in a geometry. Works for all geometries"""
output_type = rlz.shape_like('args', dt.int64)
class GeoNRings(GeoSpatialUnOp):
"""If the geometry is a polygon or multi-polygon returns the number of
rings. It counts the outer rings as well
"""
output_type = rlz.shape_like('args', dt.int64)
class GeoSRID(GeoSpatialUnOp):
"""Returns the spatial reference identifier for the ST_Geometry."""
output_type = rlz.shape_like('args', dt.int64)
class GeoSetSRID(GeoSpatialUnOp):
"""Set the spatial reference identifier for the ST_Geometry."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('args', dt.geometry)
class GeoBuffer(GeoSpatialUnOp):
"""Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry.
"""
radius = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.geometry)
class GeoCentroid(GeoSpatialUnOp):
"""Returns the geometric center of a geometry."""
output_type = rlz.shape_like('arg', dt.point)
class GeoDFullyWithin(GeoSpatialBinOp):
"""Returns True if the geometries are fully within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoDWithin(GeoSpatialBinOp):
"""Returns True if the geometries are within the specified distance
of one another.
"""
distance = Arg(rlz.floating)
output_type = rlz.shape_like('args', dt.boolean)
class GeoEnvelope(GeoSpatialUnOp):
"""Returns a geometry representing the boundingbox of the supplied geometry.
"""
output_type = rlz.shape_like('arg', dt.polygon)
class GeoAzimuth(GeoSpatialBinOp):
"""Returns the angle in radians from the horizontal of the vector defined
by pointA and pointB. Angle is computed clockwise from down-to-up:
on the clock: 12=0; 3=PI/2; 6=PI; 9=3PI/2.
"""
left = Arg(rlz.point)
right = Arg(rlz.point)
output_type = rlz.shape_like('args', dt.float64)
class GeoWithin(GeoSpatialBinOp):
"""Returns True if the geometry A is completely inside geometry B"""
output_type = rlz.shape_like('args', dt.boolean)
class GeoIntersection(GeoSpatialBinOp):
"""Returns a geometry that represents the point set intersection
of the Geometries.
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoDifference(GeoSpatialBinOp):
"""Returns a geometry that represents that part of geometry A
that does not intersect with geometry B
"""
output_type = rlz.shape_like('args', dt.geometry)
class GeoSimplify(GeoSpatialUnOp):
"""Returns a simplified version of the given geometry."""
tolerance = Arg(rlz.floating)
preserve_collapsed = Arg(rlz.boolean)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoTransform(GeoSpatialUnOp):
"""Returns a transformed version of the given geometry into a new SRID."""
srid = Arg(rlz.integer)
output_type = rlz.shape_like('arg', dt.geometry)
class GeoAsBinary(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography without SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKB(GeoSpatialUnOp):
"""Return the Well-Known Binary (WKB) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.binary)
class GeoAsEWKT(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography with SRID meta data.
"""
output_type = rlz.shape_like('arg', dt.string)
class GeoAsText(GeoSpatialUnOp):
"""Return the Well-Known Text (WKT) representation of the
geometry/geography without SRID metadata.
"""
output_type = rlz.shape_like('arg', dt.string)
class ElementWiseVectorizedUDF(ValueOp):
"""Node for element wise UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ReductionVectorizedUDF(Reduction):
"""Node for reduction UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.scalar_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class AnalyticVectorizedUDF(AnalyticOp):
"""Node for analytics UDF."""
func = Arg(callable)
func_args = Arg(tuple)
input_type = Arg(rlz.shape_like('func_args'))
_output_type = Arg(rlz.noop)
def __init__(self, func, args, input_type, output_type):
self.func = func
self.func_args = args
self.input_type = input_type
self._output_type = output_type
@property
def inputs(self):
return self.func_args
def output_type(self):
return self._output_type.column_type()
def root_tables(self):
return distinct_roots(*self.func_args)
class ExistsSubquery(Node):
"""Helper class"""
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
class NotExistsSubquery(Node):
foreign_table = Arg(rlz.noop)
predicates = Arg(rlz.noop)
def output_type(self):
return ir.ExistsExpr
|
get
|
Get an existing Schedule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Schedule']
class Schedule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
daily_recurrence: Optional[pulumi.Input[pulumi.InputType['DayDetailsArgs']]] = None,
hourly_recurrence: Optional[pulumi.Input[pulumi.InputType['HourDetailsArgs']]] = None,
lab_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_settings: Optional[pulumi.Input[pulumi.InputType['NotificationSettingsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'EnableStatus']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_resource_id: Optional[pulumi.Input[str]] = None,
task_type: Optional[pulumi.Input[str]] = None,
time_zone_id: Optional[pulumi.Input[str]] = None,
weekly_recurrence: Optional[pulumi.Input[pulumi.InputType['WeekDetailsArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A schedule.
API Version: 2018-09-15.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['DayDetailsArgs']] daily_recurrence: If the schedule will occur once each day of the week, specify the daily recurrence.
:param pulumi.Input[pulumi.InputType['HourDetailsArgs']] hourly_recurrence: If the schedule will occur multiple times a day, specify the hourly recurrence.
:param pulumi.Input[str] lab_name: The name of the lab.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the schedule.
:param pulumi.Input[pulumi.InputType['NotificationSettingsArgs']] notification_settings: Notification settings.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'EnableStatus']] status: The status of the schedule (i.e. Enabled, Disabled)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] target_resource_id: The resource ID to which the schedule belongs
:param pulumi.Input[str] task_type: The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
:param pulumi.Input[str] time_zone_id: The time zone ID (e.g. Pacific Standard time).
:param pulumi.Input[pulumi.InputType['WeekDetailsArgs']] weekly_recurrence: If the schedule will occur only some days of the week, specify the weekly recurrence.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['daily_recurrence'] = daily_recurrence
__props__['hourly_recurrence'] = hourly_recurrence
if lab_name is None and not opts.urn:
raise TypeError("Missing required property 'lab_name'")
__props__['lab_name'] = lab_name
__props__['location'] = location
__props__['name'] = name
__props__['notification_settings'] = notification_settings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['status'] = status
__props__['tags'] = tags
__props__['target_resource_id'] = target_resource_id
__props__['task_type'] = task_type
__props__['time_zone_id'] = time_zone_id
__props__['weekly_recurrence'] = weekly_recurrence
__props__['created_date'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
__props__['unique_identifier'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab/latest:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20150521preview:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:Schedule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Schedule, __self__).__init__(
'azure-nextgen:devtestlab:Schedule',
resource_name,
__props__,
opts)
# MASKED: get function (lines 103-119)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[str]:
"""
The creation date of the schedule.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="dailyRecurrence")
def daily_recurrence(self) -> pulumi.Output[Optional['outputs.DayDetailsResponse']]:
"""
If the schedule will occur once each day of the week, specify the daily recurrence.
"""
return pulumi.get(self, "daily_recurrence")
@property
@pulumi.getter(name="hourlyRecurrence")
def hourly_recurrence(self) -> pulumi.Output[Optional['outputs.HourDetailsResponse']]:
"""
If the schedule will occur multiple times a day, specify the hourly recurrence.
"""
return pulumi.get(self, "hourly_recurrence")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationSettings")
def notification_settings(self) -> pulumi.Output[Optional['outputs.NotificationSettingsResponse']]:
"""
Notification settings.
"""
return pulumi.get(self, "notification_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
The status of the schedule (i.e. Enabled, Disabled)
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID to which the schedule belongs
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Output[Optional[str]]:
"""
The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
"""
return pulumi.get(self, "task_type")
@property
@pulumi.getter(name="timeZoneId")
def time_zone_id(self) -> pulumi.Output[Optional[str]]:
"""
The time zone ID (e.g. Pacific Standard time).
"""
return pulumi.get(self, "time_zone_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> pulumi.Output[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="weeklyRecurrence")
def weekly_recurrence(self) -> pulumi.Output[Optional['outputs.WeekDetailsResponse']]:
"""
If the schedule will occur only some days of the week, specify the weekly recurrence.
"""
return pulumi.get(self, "weekly_recurrence")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Schedule':
"""
Get an existing Schedule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Schedule(resource_name, opts=opts, __props__=__props__)
| 103
| 119
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Schedule']
class Schedule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
daily_recurrence: Optional[pulumi.Input[pulumi.InputType['DayDetailsArgs']]] = None,
hourly_recurrence: Optional[pulumi.Input[pulumi.InputType['HourDetailsArgs']]] = None,
lab_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_settings: Optional[pulumi.Input[pulumi.InputType['NotificationSettingsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'EnableStatus']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_resource_id: Optional[pulumi.Input[str]] = None,
task_type: Optional[pulumi.Input[str]] = None,
time_zone_id: Optional[pulumi.Input[str]] = None,
weekly_recurrence: Optional[pulumi.Input[pulumi.InputType['WeekDetailsArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A schedule.
API Version: 2018-09-15.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['DayDetailsArgs']] daily_recurrence: If the schedule will occur once each day of the week, specify the daily recurrence.
:param pulumi.Input[pulumi.InputType['HourDetailsArgs']] hourly_recurrence: If the schedule will occur multiple times a day, specify the hourly recurrence.
:param pulumi.Input[str] lab_name: The name of the lab.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the schedule.
:param pulumi.Input[pulumi.InputType['NotificationSettingsArgs']] notification_settings: Notification settings.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'EnableStatus']] status: The status of the schedule (i.e. Enabled, Disabled)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] target_resource_id: The resource ID to which the schedule belongs
:param pulumi.Input[str] task_type: The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
:param pulumi.Input[str] time_zone_id: The time zone ID (e.g. Pacific Standard time).
:param pulumi.Input[pulumi.InputType['WeekDetailsArgs']] weekly_recurrence: If the schedule will occur only some days of the week, specify the weekly recurrence.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['daily_recurrence'] = daily_recurrence
__props__['hourly_recurrence'] = hourly_recurrence
if lab_name is None and not opts.urn:
raise TypeError("Missing required property 'lab_name'")
__props__['lab_name'] = lab_name
__props__['location'] = location
__props__['name'] = name
__props__['notification_settings'] = notification_settings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['status'] = status
__props__['tags'] = tags
__props__['target_resource_id'] = target_resource_id
__props__['task_type'] = task_type
__props__['time_zone_id'] = time_zone_id
__props__['weekly_recurrence'] = weekly_recurrence
__props__['created_date'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
__props__['unique_identifier'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab/latest:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20150521preview:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:Schedule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Schedule, __self__).__init__(
'azure-nextgen:devtestlab:Schedule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Schedule':
"""
Get an existing Schedule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Schedule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[str]:
"""
The creation date of the schedule.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="dailyRecurrence")
def daily_recurrence(self) -> pulumi.Output[Optional['outputs.DayDetailsResponse']]:
"""
If the schedule will occur once each day of the week, specify the daily recurrence.
"""
return pulumi.get(self, "daily_recurrence")
@property
@pulumi.getter(name="hourlyRecurrence")
def hourly_recurrence(self) -> pulumi.Output[Optional['outputs.HourDetailsResponse']]:
"""
If the schedule will occur multiple times a day, specify the hourly recurrence.
"""
return pulumi.get(self, "hourly_recurrence")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationSettings")
def notification_settings(self) -> pulumi.Output[Optional['outputs.NotificationSettingsResponse']]:
"""
Notification settings.
"""
return pulumi.get(self, "notification_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
The status of the schedule (i.e. Enabled, Disabled)
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID to which the schedule belongs
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Output[Optional[str]]:
"""
The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
"""
return pulumi.get(self, "task_type")
@property
@pulumi.getter(name="timeZoneId")
def time_zone_id(self) -> pulumi.Output[Optional[str]]:
"""
The time zone ID (e.g. Pacific Standard time).
"""
return pulumi.get(self, "time_zone_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> pulumi.Output[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="weeklyRecurrence")
def weekly_recurrence(self) -> pulumi.Output[Optional['outputs.WeekDetailsResponse']]:
"""
If the schedule will occur only some days of the week, specify the weekly recurrence.
"""
return pulumi.get(self, "weekly_recurrence")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
check_import_stdlib
|
Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
|
#!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
# MASKED: check_import_stdlib function (lines 127-145)
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
|
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
| 127
| 145
|
#!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
|
check_imported
|
Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
|
#!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
# MASKED: check_imported function (lines 147-178)
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
|
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
| 147
| 178
|
#!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
|
check_syntax
|
Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
|
#!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
# MASKED: check_syntax function (lines 391-435)
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
|
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
| 391
| 435
|
#!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
|
cleaner
|
Cleans out unsafe HTML tags.
Uses bleach and unescape until it reaches a fix point.
Args:
dummy: unused, sqalchemy will pass in the model class
value: html (string) to be cleaned
Returns:
Html (string) without unsafe tags.
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Provides an HTML cleaner function with sqalchemy compatible API"""
import re
import HTMLParser
import bleach
# Set up custom tags/attributes for bleach
BLEACH_TAGS = [
'caption', 'strong', 'em', 'b', 'i', 'p', 'code', 'pre', 'tt', 'samp',
'kbd', 'var', 'sub', 'sup', 'dfn', 'cite', 'big', 'small', 'address',
'hr', 'br', 'div', 'span', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ul',
'ol', 'li', 'dl', 'dt', 'dd', 'abbr', 'acronym', 'a', 'img',
'blockquote', 'del', 'ins', 'table', 'tbody', 'tr', 'td', 'th',
] + bleach.ALLOWED_TAGS
BLEACH_ATTRS = {}
ATTRS = [
'href', 'src', 'width', 'height', 'alt', 'cite', 'datetime',
'title', 'class', 'name', 'xml:lang', 'abbr'
]
BUGGY_STRINGS_PATTERN = "&.{2,3};"
for tag in BLEACH_TAGS:
BLEACH_ATTRS[tag] = ATTRS
CLEANER = bleach.sanitizer.Cleaner(
tags=BLEACH_TAGS, attributes=BLEACH_ATTRS, strip=True
)
PARSER = HTMLParser.HTMLParser()
# MASKED: cleaner function (lines 41-85)
|
def cleaner(dummy, value, *_):
"""Cleans out unsafe HTML tags.
Uses bleach and unescape until it reaches a fix point.
Args:
dummy: unused, sqalchemy will pass in the model class
value: html (string) to be cleaned
Returns:
Html (string) without unsafe tags.
"""
if value is None:
# No point in sanitizing None values
return value
if not isinstance(value, basestring):
# No point in sanitizing non-strings
return value
value = unicode(value)
buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, PARSER.unescape(value))
while True:
lastvalue = value
value = PARSER.unescape(CLEANER.clean(value))
if value == lastvalue:
break
# for some reason clean() function converts strings like "&*!;" to "&*;;".
# if we have such string we are replacing new incorrect values to old ones
if buggy_strings:
backup_value = value
updated_buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, value)
for match in updated_buggy_strings:
try:
old_value = buggy_strings.next().group()
start, finish = match.span()
value = value[:start] + old_value + value[finish:]
except StopIteration:
# If we have different number of string after clean function
# we should skip replacing
return backup_value
return value
| 41
| 85
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Provides an HTML cleaner function with sqalchemy compatible API"""
import re
import HTMLParser
import bleach
# Set up custom tags/attributes for bleach
BLEACH_TAGS = [
'caption', 'strong', 'em', 'b', 'i', 'p', 'code', 'pre', 'tt', 'samp',
'kbd', 'var', 'sub', 'sup', 'dfn', 'cite', 'big', 'small', 'address',
'hr', 'br', 'div', 'span', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ul',
'ol', 'li', 'dl', 'dt', 'dd', 'abbr', 'acronym', 'a', 'img',
'blockquote', 'del', 'ins', 'table', 'tbody', 'tr', 'td', 'th',
] + bleach.ALLOWED_TAGS
BLEACH_ATTRS = {}
ATTRS = [
'href', 'src', 'width', 'height', 'alt', 'cite', 'datetime',
'title', 'class', 'name', 'xml:lang', 'abbr'
]
BUGGY_STRINGS_PATTERN = "&.{2,3};"
for tag in BLEACH_TAGS:
BLEACH_ATTRS[tag] = ATTRS
CLEANER = bleach.sanitizer.Cleaner(
tags=BLEACH_TAGS, attributes=BLEACH_ATTRS, strip=True
)
PARSER = HTMLParser.HTMLParser()
def cleaner(dummy, value, *_):
"""Cleans out unsafe HTML tags.
Uses bleach and unescape until it reaches a fix point.
Args:
dummy: unused, sqalchemy will pass in the model class
value: html (string) to be cleaned
Returns:
Html (string) without unsafe tags.
"""
if value is None:
# No point in sanitizing None values
return value
if not isinstance(value, basestring):
# No point in sanitizing non-strings
return value
value = unicode(value)
buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, PARSER.unescape(value))
while True:
lastvalue = value
value = PARSER.unescape(CLEANER.clean(value))
if value == lastvalue:
break
# for some reason clean() function converts strings like "&*!;" to "&*;;".
# if we have such string we are replacing new incorrect values to old ones
if buggy_strings:
backup_value = value
updated_buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, value)
for match in updated_buggy_strings:
try:
old_value = buggy_strings.next().group()
start, finish = match.span()
value = value[:start] + old_value + value[finish:]
except StopIteration:
# If we have different number of string after clean function
# we should skip replacing
return backup_value
return value
|
__init__
|
The class constructor function.
The only data that we require is the number of points that
make up the mesh.
We optionally take the extrema of the domain, number of ghost
cells (assume 1)
|
"""
The patch module allows for a grid to be created and for data to be
defined on that grid.
Typical usage:
-- create the grid
grid = Grid1d(nx)
-- create the data that lives on that grid
data = CellCenterData1d(grid)
bcObj = bcObject(xlb="reflect", xrb="reflect"_
data.registerVar("density", bcObj)
...
data.create()
-- initialize some data
dens = data.get_var("density")
dens[:,:] = ...
-- fill the ghost cells
data.fil_lBC("density")
"""
from __future__ import print_function
import sys
import numpy
valid = ["outflow", "periodic",
"reflect", "reflect-even", "reflect-odd",
"dirichlet", "neumann"]
class BCObject(object):
"""
Boundary condition container -- hold the BCs on each boundary
for a single variable
"""
def __init__(self,
xlb="outflow", xrb="outflow",
odd_reflect_dir=""):
# note: "reflect" is ambiguous and will be converted into
# either reflect-even (the default) or reflect-odd
if xlb not in valid or xrb not in valid:
sys.exit("ERROR: invalid BC")
# -x boundary
self.xlb = xlb
if self.xlb == "reflect":
self.xlb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# +x boundary
self.xrb = xrb
if self.xrb == "reflect":
self.xrb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# periodic checks
if ((xlb == "periodic" and xrb != "periodic") or
(xrb == "periodic" and xlb != "periodic")):
sys.exit("ERROR: both xlb and xrb must be periodic")
def __str__(self):
""" print out some basic information about the BC object """
string = "BCs: -x: %s +x: %s " % \
(self.xlb, self.xrb)
return string
class Grid1d(object):
"""
the 1-d grid class. The grid object will contain the coordinate
information (at various centerings).
A basic (1-d) representation of the layout is:
| | | X | | | | X | | |
+--*--+- // -+--*--X--*--+--*--+- // -+--*--+--*--X--*--+- // -+--*--+
0 ng-1 ng ng+1 ... ng+nx-1 ng+nx 2ng+nx-1
ilo ihi
|<- ng ghostcells->|<---- nx interior zones ----->|<- ng ghostcells->|
The '*' marks the data locations.
"""
# MASKED: __init__ function (lines 105-137)
def scratch_array(self):
return numpy.zeros((self.qx), dtype=numpy.float64)
def __str__(self):
""" print out some basic information about the grid object """
return "1-d grid: nx = {}, ng = {}".format(self.nx, self.ng)
class CellCenterData1d(object):
"""
the cell-centered data that lives on a grid.
a CellCenterData1d object is built in a multi-step process before it can
be used. We pass in a grid object to describe where the data
lives:
my_data = patch.CellCenterData1d(myGrid)
register any variables that we expect to live on this patch. Here
bcObject describes the boundary conditions for that variable.
my_data.registerVar('density', bcObject)
my_data.registerVar('x-momentum', bcObject)
...
finally, finish the initialization of the patch
my_data.create()
This last step actually allocates the storage for the state
variables. Once this is done, the patch is considered to be
locked. New variables cannot be added.
"""
def __init__(self, grid, dtype=numpy.float64):
self.grid = grid
self.dtype = dtype
self.data = None
self.vars = []
self.nvar = 0
self.BCs = {}
# time
self.t = -1
self.initialized = 0
def register_var(self, name, bc_object):
"""
register a variable with CellCenterData1d object. Here we pass in a
BCObject that describes the boundary conditions for that
variable.
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.vars.append(name)
self.nvar += 1
self.BCs[name] = bc_object
def create(self):
"""
called after all the variables are registered and allocates
the storage for the state data
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.data = numpy.zeros((self.nvar, self.grid.qx), dtype=self.dtype)
self.initialized = 1
def __str__(self):
""" print out some basic information about the ccData2d object """
if self.initialized == 0:
mystr = "CellCenterData1d object not yet initialized"
return mystr
mystr = "cc data: nx = {}, ng = {}\n".format(self.grid.nx, self.grid.ng) + \
" nvars = {}\n".format(self.nvar) + \
"variables: \n"
ilo = self.grid.ilo
ihi = self.grid.ihi
for n in range(self.nvar):
mystr += "%16s: min: %15.10f max: %15.10f\n" % \
(self.vars[n],
numpy.min(self.data[n, ilo:ihi+1]),
numpy.max(self.data[n, ilo:ihi+1]))
mystr += "%16s BCs: -x: %-12s +x: %-12s \n" %\
(" ", self.BCs[self.vars[n]].xlb,
self.BCs[self.vars[n]].xrb)
return mystr
def get_var(self, name):
"""
return a data array the variable described by name. Any changes
made to this are automatically reflected in the CellCenterData1d
object.
"""
n = self.vars.index(name)
return self.data[n, :]
def zero(self, name):
n = self.vars.index(name)
self.data[n, :] = 0.0
def fill_BC_all(self):
"""
fill boundary conditions on all variables
"""
for name in self.vars:
self.fill_BC(name)
def fill_BC(self, name):
"""
fill the boundary conditions. This operates on a single state
variable at a time, to allow for maximum flexibility
we do periodic, reflect-even, reflect-odd, and outflow
each variable name has a corresponding bc_object stored in the
ccData2d object -- we refer to this to figure out the action
to take at each boundary.
"""
# there is only a single grid, so every boundary is on
# a physical boundary (except if we are periodic)
# Note: we piggy-back on outflow and reflect-odd for
# Neumann and Dirichlet homogeneous BCs respectively, but
# this only works for a single ghost cell
n = self.vars.index(name)
# -x boundary
if self.BCs[name].xlb == "outflow" or self.BCs[name].xlb == "neumann":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ilo]
elif self.BCs[name].xlb == "reflect-even":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ilo):
self.data[n, i] = -self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb == "periodic":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ihi-self.grid.ng+i+1]
# +x boundary
if self.BCs[name].xrb == "outflow" or self.BCs[name].xrb == "neumann":
for i in range(self.grid.ihi+1, self.grid.nx+2*self.grid.ng):
self.data[n, i] = self.data[n, self.grid.ihi]
elif self.BCs[name].xrb == "reflect-even":
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = self.data[n, i_src]
elif self.BCs[name].xrb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = -self.data[n, i_src]
elif self.BCs[name].xrb == "periodic":
for i in range(self.grid.ihi+1, 2*self.grid.ng + self.grid.nx):
self.data[n, i] = self.data[n, i-self.grid.ihi-1+self.grid.ng]
def restrict(self, varname):
"""
restrict the variable varname to a coarser grid (factor of 2
coarser) and return an array with the resulting data (and same
number of ghostcells)
"""
fG = self.grid
fData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_c = fG.ng
nx_c = fG.nx//2
cData = numpy.zeros((2*ng_c+nx_c), dtype=self.dtype)
ilo_c = ng_c
ihi_c = ng_c+nx_c-1
# fill the coarse array with the restricted data -- just
# average the 2 fine cells into the corresponding coarse cell
# that encompasses them.
# This is done by shifting our view into the fData array and
# using a stride of 2 in the indexing.
cData[ilo_c:ihi_c+1] = \
0.5*(fData[fG.ilo :fG.ihi+1:2] + fData[fG.ilo+1:fG.ihi+1:2])
return cData
def prolong(self, varname):
"""
prolong the data in the current (coarse) grid to a finer
(factor of 2 finer) grid. Return an array with the resulting
data (and same number of ghostcells).
We will reconstruct the data in the zone from the
zone-averaged variables using the centered-difference slopes
(x)
f(x,y) = m x/dx + <f>
When averaged over the parent cell, this reproduces <f>.
Each zone's reconstrution will be averaged over 2 children.
| | | | |
| <f> | --> | | |
| | | 1 | 2 |
+-----------+ +-----+-----+
We will fill each of the finer resolution zones by filling all
the 1's together, using a stride 2 into the fine array. Then
the 2's, this allows us to operate in a vector
fashion. All operations will use the same slopes for their
respective parents.
"""
cG = self.grid
cData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_f = cG.ng
nx_f = cG.nx*2
fData = numpy.zeros((2*ng_f+nx_f), dtype=self.dtype)
ilo_f = ng_f
ihi_f = ng_f+nx_f-1
# slopes for the coarse data
m_x = cG.scratch_array()
m_x[cG.ilo:cG.ihi+1] = \
0.5*(cData[cG.ilo+1:cG.ihi+2] - cData[cG.ilo-1:cG.ihi])
# fill the '1' children
fData[ilo_f:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] - 0.25*m_x[cG.ilo:cG.ihi+1]
# fill the '2' children
fData[ilo_f+1:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] + 0.25*m_x[cG.ilo:cG.ihi+1]
return fData
if __name__ == "__main__":
# illustrate basic mesh operations
myg = Grid1d(16, xmax=1.0)
mydata = CellCenterData1d(myg)
bc = BCObject()
mydata.register_var("a", bc)
mydata.create()
a = mydata.get_var("a")
a[:] = numpy.exp(-(myg.x - 0.5)**2/0.1**2)
print(mydata)
|
def __init__(self, nx, ng=1, xmin=0.0, xmax=1.0):
"""
The class constructor function.
The only data that we require is the number of points that
make up the mesh.
We optionally take the extrema of the domain, number of ghost
cells (assume 1)
"""
# size of grid
self.nx = nx
self.ng = ng
self.qx = 2*ng+nx
# domain extrema
self.xmin = xmin
self.xmax = xmax
# compute the indices of the block interior (excluding guardcells)
self.ilo = ng
self.ihi = ng+nx-1
# define the coordinate information at the left, center, and right
# zone coordinates
self.dx = (xmax - xmin)/nx
self.xl = (numpy.arange(nx+2*ng) - ng)*self.dx + xmin
self.xr = (numpy.arange(nx+2*ng) + 1.0 - ng)*self.dx + xmin
self.x = 0.5*(self.xl + self.xr)
| 105
| 137
|
"""
The patch module allows for a grid to be created and for data to be
defined on that grid.
Typical usage:
-- create the grid
grid = Grid1d(nx)
-- create the data that lives on that grid
data = CellCenterData1d(grid)
bcObj = bcObject(xlb="reflect", xrb="reflect"_
data.registerVar("density", bcObj)
...
data.create()
-- initialize some data
dens = data.get_var("density")
dens[:,:] = ...
-- fill the ghost cells
data.fil_lBC("density")
"""
from __future__ import print_function
import sys
import numpy
valid = ["outflow", "periodic",
"reflect", "reflect-even", "reflect-odd",
"dirichlet", "neumann"]
class BCObject(object):
"""
Boundary condition container -- hold the BCs on each boundary
for a single variable
"""
def __init__(self,
xlb="outflow", xrb="outflow",
odd_reflect_dir=""):
# note: "reflect" is ambiguous and will be converted into
# either reflect-even (the default) or reflect-odd
if xlb not in valid or xrb not in valid:
sys.exit("ERROR: invalid BC")
# -x boundary
self.xlb = xlb
if self.xlb == "reflect":
self.xlb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# +x boundary
self.xrb = xrb
if self.xrb == "reflect":
self.xrb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# periodic checks
if ((xlb == "periodic" and xrb != "periodic") or
(xrb == "periodic" and xlb != "periodic")):
sys.exit("ERROR: both xlb and xrb must be periodic")
def __str__(self):
""" print out some basic information about the BC object """
string = "BCs: -x: %s +x: %s " % \
(self.xlb, self.xrb)
return string
class Grid1d(object):
"""
the 1-d grid class. The grid object will contain the coordinate
information (at various centerings).
A basic (1-d) representation of the layout is:
| | | X | | | | X | | |
+--*--+- // -+--*--X--*--+--*--+- // -+--*--+--*--X--*--+- // -+--*--+
0 ng-1 ng ng+1 ... ng+nx-1 ng+nx 2ng+nx-1
ilo ihi
|<- ng ghostcells->|<---- nx interior zones ----->|<- ng ghostcells->|
The '*' marks the data locations.
"""
def __init__(self, nx, ng=1, xmin=0.0, xmax=1.0):
"""
The class constructor function.
The only data that we require is the number of points that
make up the mesh.
We optionally take the extrema of the domain, number of ghost
cells (assume 1)
"""
# size of grid
self.nx = nx
self.ng = ng
self.qx = 2*ng+nx
# domain extrema
self.xmin = xmin
self.xmax = xmax
# compute the indices of the block interior (excluding guardcells)
self.ilo = ng
self.ihi = ng+nx-1
# define the coordinate information at the left, center, and right
# zone coordinates
self.dx = (xmax - xmin)/nx
self.xl = (numpy.arange(nx+2*ng) - ng)*self.dx + xmin
self.xr = (numpy.arange(nx+2*ng) + 1.0 - ng)*self.dx + xmin
self.x = 0.5*(self.xl + self.xr)
def scratch_array(self):
return numpy.zeros((self.qx), dtype=numpy.float64)
def __str__(self):
""" print out some basic information about the grid object """
return "1-d grid: nx = {}, ng = {}".format(self.nx, self.ng)
class CellCenterData1d(object):
"""
the cell-centered data that lives on a grid.
a CellCenterData1d object is built in a multi-step process before it can
be used. We pass in a grid object to describe where the data
lives:
my_data = patch.CellCenterData1d(myGrid)
register any variables that we expect to live on this patch. Here
bcObject describes the boundary conditions for that variable.
my_data.registerVar('density', bcObject)
my_data.registerVar('x-momentum', bcObject)
...
finally, finish the initialization of the patch
my_data.create()
This last step actually allocates the storage for the state
variables. Once this is done, the patch is considered to be
locked. New variables cannot be added.
"""
def __init__(self, grid, dtype=numpy.float64):
self.grid = grid
self.dtype = dtype
self.data = None
self.vars = []
self.nvar = 0
self.BCs = {}
# time
self.t = -1
self.initialized = 0
def register_var(self, name, bc_object):
"""
register a variable with CellCenterData1d object. Here we pass in a
BCObject that describes the boundary conditions for that
variable.
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.vars.append(name)
self.nvar += 1
self.BCs[name] = bc_object
def create(self):
"""
called after all the variables are registered and allocates
the storage for the state data
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.data = numpy.zeros((self.nvar, self.grid.qx), dtype=self.dtype)
self.initialized = 1
def __str__(self):
""" print out some basic information about the ccData2d object """
if self.initialized == 0:
mystr = "CellCenterData1d object not yet initialized"
return mystr
mystr = "cc data: nx = {}, ng = {}\n".format(self.grid.nx, self.grid.ng) + \
" nvars = {}\n".format(self.nvar) + \
"variables: \n"
ilo = self.grid.ilo
ihi = self.grid.ihi
for n in range(self.nvar):
mystr += "%16s: min: %15.10f max: %15.10f\n" % \
(self.vars[n],
numpy.min(self.data[n, ilo:ihi+1]),
numpy.max(self.data[n, ilo:ihi+1]))
mystr += "%16s BCs: -x: %-12s +x: %-12s \n" %\
(" ", self.BCs[self.vars[n]].xlb,
self.BCs[self.vars[n]].xrb)
return mystr
def get_var(self, name):
"""
return a data array the variable described by name. Any changes
made to this are automatically reflected in the CellCenterData1d
object.
"""
n = self.vars.index(name)
return self.data[n, :]
def zero(self, name):
n = self.vars.index(name)
self.data[n, :] = 0.0
def fill_BC_all(self):
"""
fill boundary conditions on all variables
"""
for name in self.vars:
self.fill_BC(name)
def fill_BC(self, name):
"""
fill the boundary conditions. This operates on a single state
variable at a time, to allow for maximum flexibility
we do periodic, reflect-even, reflect-odd, and outflow
each variable name has a corresponding bc_object stored in the
ccData2d object -- we refer to this to figure out the action
to take at each boundary.
"""
# there is only a single grid, so every boundary is on
# a physical boundary (except if we are periodic)
# Note: we piggy-back on outflow and reflect-odd for
# Neumann and Dirichlet homogeneous BCs respectively, but
# this only works for a single ghost cell
n = self.vars.index(name)
# -x boundary
if self.BCs[name].xlb == "outflow" or self.BCs[name].xlb == "neumann":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ilo]
elif self.BCs[name].xlb == "reflect-even":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ilo):
self.data[n, i] = -self.data[n, 2*self.grid.ng-i-1]
elif self.BCs[name].xlb == "periodic":
for i in range(0, self.grid.ilo):
self.data[n, i] = self.data[n, self.grid.ihi-self.grid.ng+i+1]
# +x boundary
if self.BCs[name].xrb == "outflow" or self.BCs[name].xrb == "neumann":
for i in range(self.grid.ihi+1, self.grid.nx+2*self.grid.ng):
self.data[n, i] = self.data[n, self.grid.ihi]
elif self.BCs[name].xrb == "reflect-even":
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = self.data[n, i_src]
elif self.BCs[name].xrb in ["reflect-odd", "dirichlet"]:
for i in range(0, self.grid.ng):
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n, i_bnd] = -self.data[n, i_src]
elif self.BCs[name].xrb == "periodic":
for i in range(self.grid.ihi+1, 2*self.grid.ng + self.grid.nx):
self.data[n, i] = self.data[n, i-self.grid.ihi-1+self.grid.ng]
def restrict(self, varname):
"""
restrict the variable varname to a coarser grid (factor of 2
coarser) and return an array with the resulting data (and same
number of ghostcells)
"""
fG = self.grid
fData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_c = fG.ng
nx_c = fG.nx//2
cData = numpy.zeros((2*ng_c+nx_c), dtype=self.dtype)
ilo_c = ng_c
ihi_c = ng_c+nx_c-1
# fill the coarse array with the restricted data -- just
# average the 2 fine cells into the corresponding coarse cell
# that encompasses them.
# This is done by shifting our view into the fData array and
# using a stride of 2 in the indexing.
cData[ilo_c:ihi_c+1] = \
0.5*(fData[fG.ilo :fG.ihi+1:2] + fData[fG.ilo+1:fG.ihi+1:2])
return cData
def prolong(self, varname):
"""
prolong the data in the current (coarse) grid to a finer
(factor of 2 finer) grid. Return an array with the resulting
data (and same number of ghostcells).
We will reconstruct the data in the zone from the
zone-averaged variables using the centered-difference slopes
(x)
f(x,y) = m x/dx + <f>
When averaged over the parent cell, this reproduces <f>.
Each zone's reconstrution will be averaged over 2 children.
| | | | |
| <f> | --> | | |
| | | 1 | 2 |
+-----------+ +-----+-----+
We will fill each of the finer resolution zones by filling all
the 1's together, using a stride 2 into the fine array. Then
the 2's, this allows us to operate in a vector
fashion. All operations will use the same slopes for their
respective parents.
"""
cG = self.grid
cData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_f = cG.ng
nx_f = cG.nx*2
fData = numpy.zeros((2*ng_f+nx_f), dtype=self.dtype)
ilo_f = ng_f
ihi_f = ng_f+nx_f-1
# slopes for the coarse data
m_x = cG.scratch_array()
m_x[cG.ilo:cG.ihi+1] = \
0.5*(cData[cG.ilo+1:cG.ihi+2] - cData[cG.ilo-1:cG.ihi])
# fill the '1' children
fData[ilo_f:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] - 0.25*m_x[cG.ilo:cG.ihi+1]
# fill the '2' children
fData[ilo_f+1:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] + 0.25*m_x[cG.ilo:cG.ihi+1]
return fData
if __name__ == "__main__":
# illustrate basic mesh operations
myg = Grid1d(16, xmax=1.0)
mydata = CellCenterData1d(myg)
bc = BCObject()
mydata.register_var("a", bc)
mydata.create()
a = mydata.get_var("a")
a[:] = numpy.exp(-(myg.x - 0.5)**2/0.1**2)
print(mydata)
|
_flatten_meta
|
Flattens metadata fields in a Sample object. Fields are concatenated into a
single string field to save into an Elasticsearch index
meta - Sample Metadata to be flattened
prefix - (optional) prefix for the metadata values. default=None
|
from src.utils.config import config
import json
# import uuid
import requests
_NAMESPACE = "WS"
_VER_NAMESPACE = "WSVER"
_SAMPLE_NAMESPACE = "SMP"
# versioned and non-versioned index have same version
_SAMPLE_SET_INDEX_VERSION = 1
_SAMPLE_SET_INDEX_NAME = 'sample_set_' + str(_SAMPLE_SET_INDEX_VERSION)
_VER_SAMPLE_SET_INDEX_NAME = 'sample_set_version_' + str(_SAMPLE_SET_INDEX_VERSION)
# versioned and non-versioned index have same version
_SAMPLE_INDEX_VERSION = 1
_SAMPLE_INDEX_NAME = 'sample_' + str(_SAMPLE_INDEX_VERSION)
# _VER_SAMPLE_INDEX_NAME = 'sample_version_' + str(_SAMPLE_INDEX_VERSION)
def _get_sample(sample_info):
""" Get sample from SampleService
sample_info - dict containing 'id' and 'version' of a sample
"""
headers = {"Authorization": config()['ws_token']}
params = {
"id": sample_info['id']
}
if sample_info.get('version'):
params['version'] = sample_info['version']
payload = {
"method": "SampleService.get_sample",
"id": "", # str(uuid.uuid4()),
"params": [params],
"version": "1.1"
}
resp = requests.post(url=config()['sample_service_url'], headers=headers, data=json.dumps(payload))
if not resp.ok:
raise RuntimeError(f"Returned from sample service with status {resp.status_code} - {resp.text}")
resp_json = resp.json()
if resp_json.get('error'):
raise RuntimeError(f"Error from SampleService - {resp_json['error']}")
sample = resp_json['result'][0]
return sample
# MASKED: _flatten_meta function (lines 46-63)
def _combine_meta(meta, flattened_meta, idx):
""" Combine newly flattened metadata with existing metadata. This Function is designed to keep the indexing
of the different metadata fields consistent for each node within the sample node tree s.t. all the
fields in index (idx) 0 will be from item 0 in the node tree. Empty string ("") entries are Empty and
added simply so that the indexing of all fields line up.
meta - existing metadata.
flattened_meta - newly flattened metadata.
idx - current index of ndoe_tree.
"""
for key in flattened_meta:
if key in meta:
meta[key] += ["" for _ in range(idx - len(meta[key]))] + [flattened_meta[key]]
else:
meta[key] = ["" for _ in range(idx)] + [flattened_meta[key]]
return meta
def index_sample_set(obj_data, ws_info, obj_data_v1):
"""Indexer for KBaseSets.SampleSet object type"""
info = obj_data['info']
if not obj_data.get('data'):
raise Exception("no data in object")
data = obj_data['data']
workspace_id = info[6]
object_id = info[0]
version = info[4]
sample_set_id = f"{_NAMESPACE}::{workspace_id}:{object_id}"
ver_sample_set_id = f"{_VER_NAMESPACE}::{workspace_id}:{object_id}:{version}"
sample_set_index = {
"_action": "index",
"doc": {
"description": data["description"],
"sample_ids": [s['id'] for s in data['samples']],
"sample_names": [s['name'] for s in data['samples']],
"sample_versions": [s['version'] for s in data['samples']]
},
"index": _SAMPLE_SET_INDEX_NAME,
"id": sample_set_id
}
yield sample_set_index
ver_sample_set_index = dict(sample_set_index)
ver_sample_set_index['index'] = _VER_SAMPLE_SET_INDEX_NAME
ver_sample_set_index['id'] = ver_sample_set_id
yield ver_sample_set_index
for samp in data["samples"]:
# query the sample service for sample
sample = _get_sample(samp)
sample_id = f"{_SAMPLE_NAMESPACE}::{sample['id']}:{sample['version']}"
# not sure on how we need to handle more than 1 node.
if len(sample['node_tree']) == 1:
meta_controlled = _flatten_meta(
sample['node_tree'][0]['meta_controlled']
)
meta_user = _flatten_meta(
sample['node_tree'][0]['meta_user']
)
meta_controlled['node_id'] = sample['node_tree'][0]['id']
else:
meta_controlled, meta_user = {}, {}
for idx, node in enumerate(sample['node_tree']):
meta_controlled = _combine_meta(
meta_controlled,
_flatten_meta(
node['meta_controlled']
),
idx
)
meta_user = _combine_meta(
meta_user,
_flatten_meta(
node['meta_user']
),
idx
)
meta_controlled['node_id'] = node['id']
sample_index = {
"_action": "index",
"doc": {
"save_date": sample['save_date'],
"sample_version": sample['version'],
"name": sample['name'],
"parent_id": sample_set_id,
**meta_user,
**meta_controlled
},
"index": _SAMPLE_INDEX_NAME,
"id": sample_id
}
yield sample_index
|
def _flatten_meta(meta, prefix=None):
""" Flattens metadata fields in a Sample object. Fields are concatenated into a
single string field to save into an Elasticsearch index
meta - Sample Metadata to be flattened
prefix - (optional) prefix for the metadata values. default=None
"""
new_meta = {}
for key in meta:
if prefix:
val = prefix + ":"
else:
val = ""
if "value" in meta[key]:
val += str(meta[key]['value'])
if "units" in meta[key]:
val += ";" + str(meta[key]['units'])
new_meta[key] = val
return new_meta
| 46
| 63
|
from src.utils.config import config
import json
# import uuid
import requests
_NAMESPACE = "WS"
_VER_NAMESPACE = "WSVER"
_SAMPLE_NAMESPACE = "SMP"
# versioned and non-versioned index have same version
_SAMPLE_SET_INDEX_VERSION = 1
_SAMPLE_SET_INDEX_NAME = 'sample_set_' + str(_SAMPLE_SET_INDEX_VERSION)
_VER_SAMPLE_SET_INDEX_NAME = 'sample_set_version_' + str(_SAMPLE_SET_INDEX_VERSION)
# versioned and non-versioned index have same version
_SAMPLE_INDEX_VERSION = 1
_SAMPLE_INDEX_NAME = 'sample_' + str(_SAMPLE_INDEX_VERSION)
# _VER_SAMPLE_INDEX_NAME = 'sample_version_' + str(_SAMPLE_INDEX_VERSION)
def _get_sample(sample_info):
""" Get sample from SampleService
sample_info - dict containing 'id' and 'version' of a sample
"""
headers = {"Authorization": config()['ws_token']}
params = {
"id": sample_info['id']
}
if sample_info.get('version'):
params['version'] = sample_info['version']
payload = {
"method": "SampleService.get_sample",
"id": "", # str(uuid.uuid4()),
"params": [params],
"version": "1.1"
}
resp = requests.post(url=config()['sample_service_url'], headers=headers, data=json.dumps(payload))
if not resp.ok:
raise RuntimeError(f"Returned from sample service with status {resp.status_code} - {resp.text}")
resp_json = resp.json()
if resp_json.get('error'):
raise RuntimeError(f"Error from SampleService - {resp_json['error']}")
sample = resp_json['result'][0]
return sample
def _flatten_meta(meta, prefix=None):
""" Flattens metadata fields in a Sample object. Fields are concatenated into a
single string field to save into an Elasticsearch index
meta - Sample Metadata to be flattened
prefix - (optional) prefix for the metadata values. default=None
"""
new_meta = {}
for key in meta:
if prefix:
val = prefix + ":"
else:
val = ""
if "value" in meta[key]:
val += str(meta[key]['value'])
if "units" in meta[key]:
val += ";" + str(meta[key]['units'])
new_meta[key] = val
return new_meta
def _combine_meta(meta, flattened_meta, idx):
""" Combine newly flattened metadata with existing metadata. This Function is designed to keep the indexing
of the different metadata fields consistent for each node within the sample node tree s.t. all the
fields in index (idx) 0 will be from item 0 in the node tree. Empty string ("") entries are Empty and
added simply so that the indexing of all fields line up.
meta - existing metadata.
flattened_meta - newly flattened metadata.
idx - current index of ndoe_tree.
"""
for key in flattened_meta:
if key in meta:
meta[key] += ["" for _ in range(idx - len(meta[key]))] + [flattened_meta[key]]
else:
meta[key] = ["" for _ in range(idx)] + [flattened_meta[key]]
return meta
def index_sample_set(obj_data, ws_info, obj_data_v1):
"""Indexer for KBaseSets.SampleSet object type"""
info = obj_data['info']
if not obj_data.get('data'):
raise Exception("no data in object")
data = obj_data['data']
workspace_id = info[6]
object_id = info[0]
version = info[4]
sample_set_id = f"{_NAMESPACE}::{workspace_id}:{object_id}"
ver_sample_set_id = f"{_VER_NAMESPACE}::{workspace_id}:{object_id}:{version}"
sample_set_index = {
"_action": "index",
"doc": {
"description": data["description"],
"sample_ids": [s['id'] for s in data['samples']],
"sample_names": [s['name'] for s in data['samples']],
"sample_versions": [s['version'] for s in data['samples']]
},
"index": _SAMPLE_SET_INDEX_NAME,
"id": sample_set_id
}
yield sample_set_index
ver_sample_set_index = dict(sample_set_index)
ver_sample_set_index['index'] = _VER_SAMPLE_SET_INDEX_NAME
ver_sample_set_index['id'] = ver_sample_set_id
yield ver_sample_set_index
for samp in data["samples"]:
# query the sample service for sample
sample = _get_sample(samp)
sample_id = f"{_SAMPLE_NAMESPACE}::{sample['id']}:{sample['version']}"
# not sure on how we need to handle more than 1 node.
if len(sample['node_tree']) == 1:
meta_controlled = _flatten_meta(
sample['node_tree'][0]['meta_controlled']
)
meta_user = _flatten_meta(
sample['node_tree'][0]['meta_user']
)
meta_controlled['node_id'] = sample['node_tree'][0]['id']
else:
meta_controlled, meta_user = {}, {}
for idx, node in enumerate(sample['node_tree']):
meta_controlled = _combine_meta(
meta_controlled,
_flatten_meta(
node['meta_controlled']
),
idx
)
meta_user = _combine_meta(
meta_user,
_flatten_meta(
node['meta_user']
),
idx
)
meta_controlled['node_id'] = node['id']
sample_index = {
"_action": "index",
"doc": {
"save_date": sample['save_date'],
"sample_version": sample['version'],
"name": sample['name'],
"parent_id": sample_set_id,
**meta_user,
**meta_controlled
},
"index": _SAMPLE_INDEX_NAME,
"id": sample_id
}
yield sample_index
|
__init__
|
Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
|
"""Queuing Search Algorithm.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class QSA(Optimizer):
"""A QSA class, inherited from Optimizer.
This is the designed class to define QSA-related
variables and methods.
References:
J. Zhang et al. Queuing search algorithm: A novel metaheuristic algorithm
for solving engineering optimization problems.
Applied Mathematical Modelling (2018).
"""
# MASKED: __init__ function (lines 29-45)
def _calculate_queue(self, n_agents, t_1, t_2, t_3):
"""Calculates the number of agents that belongs to each queue.
Args:
n_agents (int): Number of agents.
t_1 (float): Fitness value of first agent in the population.
t_2 (float): Fitness value of second agent in the population.
t_3 (float): Fitness value of third agent in the population.
Returns:
The number of agents in first, second and third queues.
"""
# Checks if potential service time is bigger than `epsilon`
if t_1 > c.EPSILON:
# Calculates the proportion of agents in first, second and third queues
n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
# If the potential service time is smaller than `epsilon`
else:
# Each queue will have 1/3 ratio
n_1 = 1 / 3
n_2 = 1 / 3
n_3 = 1 / 3
# Calculates the number of agents that belongs to each queue
q_1 = int(n_1 * n_agents)
q_2 = int(n_2 * n_agents)
q_3 = int(n_3 * n_agents)
return q_1, q_2, q_3
def _business_one(self, agents, function, beta):
"""Performs the first business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
beta (float): Range of fluctuation.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Represents the update patterns by eq. 4 and eq. 5
case = None
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# If it is the first agent in first queue
if i == 0:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# If index is the first agent in second queue
if i == q_1:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# If index is the first agent in third queue
if i == q_1 + q_2:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
alpha = r.generate_uniform_random_number(-1, 1)
# Generates an Erlang distribution
E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))
# If case is defined as one
if case == 1:
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Calculates the fluctuation (eq. 6)
F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \
e * (A.position - a.position)
# Updates the temporary agent's position (eq. 4)
a.position = A.position + F_1
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as one
case = 1
# If new fitness is worse than current agent's fitness
else:
# Defines the case as two
case = 2
# If case is defined as two
else:
# Calculates the fluctuation (eq. 7)
F_2 = beta * alpha * (E * np.fabs(A.position - a.position))
# Updates the temporary agent's position (eq. 5)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as two
case = 2
# If new fitness is worse than current agent's fitness
else:
# Defines the case as one
case = 1
def _business_two(self, agents, function):
"""Performs the second business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Calculates the confusion degree
cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# If random number is smaller than confusion degree
if r2 < cv:
# Calculates the fluctuation (eq. 14)
F_1 = e * (A_1.position - A_2.position)
# Update agent's position (eq. 12)
a.position += F_1
# If random number is bigger than confusion degree
else:
# Calculates the fluctuation (eq. 15)
F_2 = e * (A.position - A_1.position)
# Update agent's position (eq. 13)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def _business_three(self, agents, function):
"""Performs the third business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# Iterates through all decision variables
for j in range(agent.n_variables):
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Updates temporary agent's position (eq. 17)
a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def update(self, space, function, iteration, n_iterations):
"""Wraps Queue Search Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
iteration (int): Current iteration.
n_iterations (int): Maximum number of iterations.
"""
# Calculates the range of fluctuation.
beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))
# Performs the first business phase
self._business_one(space.agents, function, beta)
# Performs the second business phase
self._business_two(space.agents, function)
# Performs the third business phase
self._business_three(space.agents, function)
|
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> QSA.')
# Overrides its parent class with the receiving params
super(QSA, self).__init__()
# Builds the class
self.build(params)
logger.info('Class overrided.')
| 29
| 45
|
"""Queuing Search Algorithm.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class QSA(Optimizer):
"""A QSA class, inherited from Optimizer.
This is the designed class to define QSA-related
variables and methods.
References:
J. Zhang et al. Queuing search algorithm: A novel metaheuristic algorithm
for solving engineering optimization problems.
Applied Mathematical Modelling (2018).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> QSA.')
# Overrides its parent class with the receiving params
super(QSA, self).__init__()
# Builds the class
self.build(params)
logger.info('Class overrided.')
def _calculate_queue(self, n_agents, t_1, t_2, t_3):
"""Calculates the number of agents that belongs to each queue.
Args:
n_agents (int): Number of agents.
t_1 (float): Fitness value of first agent in the population.
t_2 (float): Fitness value of second agent in the population.
t_3 (float): Fitness value of third agent in the population.
Returns:
The number of agents in first, second and third queues.
"""
# Checks if potential service time is bigger than `epsilon`
if t_1 > c.EPSILON:
# Calculates the proportion of agents in first, second and third queues
n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
# If the potential service time is smaller than `epsilon`
else:
# Each queue will have 1/3 ratio
n_1 = 1 / 3
n_2 = 1 / 3
n_3 = 1 / 3
# Calculates the number of agents that belongs to each queue
q_1 = int(n_1 * n_agents)
q_2 = int(n_2 * n_agents)
q_3 = int(n_3 * n_agents)
return q_1, q_2, q_3
def _business_one(self, agents, function, beta):
"""Performs the first business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
beta (float): Range of fluctuation.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Represents the update patterns by eq. 4 and eq. 5
case = None
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# If it is the first agent in first queue
if i == 0:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# If index is the first agent in second queue
if i == q_1:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# If index is the first agent in third queue
if i == q_1 + q_2:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
alpha = r.generate_uniform_random_number(-1, 1)
# Generates an Erlang distribution
E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))
# If case is defined as one
if case == 1:
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Calculates the fluctuation (eq. 6)
F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \
e * (A.position - a.position)
# Updates the temporary agent's position (eq. 4)
a.position = A.position + F_1
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as one
case = 1
# If new fitness is worse than current agent's fitness
else:
# Defines the case as two
case = 2
# If case is defined as two
else:
# Calculates the fluctuation (eq. 7)
F_2 = beta * alpha * (E * np.fabs(A.position - a.position))
# Updates the temporary agent's position (eq. 5)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as two
case = 2
# If new fitness is worse than current agent's fitness
else:
# Defines the case as one
case = 1
def _business_two(self, agents, function):
"""Performs the second business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Calculates the confusion degree
cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# If random number is smaller than confusion degree
if r2 < cv:
# Calculates the fluctuation (eq. 14)
F_1 = e * (A_1.position - A_2.position)
# Update agent's position (eq. 12)
a.position += F_1
# If random number is bigger than confusion degree
else:
# Calculates the fluctuation (eq. 15)
F_2 = e * (A.position - A_1.position)
# Update agent's position (eq. 13)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def _business_three(self, agents, function):
"""Performs the third business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# Iterates through all decision variables
for j in range(agent.n_variables):
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Updates temporary agent's position (eq. 17)
a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def update(self, space, function, iteration, n_iterations):
"""Wraps Queue Search Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
iteration (int): Current iteration.
n_iterations (int): Maximum number of iterations.
"""
# Calculates the range of fluctuation.
beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))
# Performs the first business phase
self._business_one(space.agents, function, beta)
# Performs the second business phase
self._business_two(space.agents, function)
# Performs the third business phase
self._business_three(space.agents, function)
|
_calculate_queue
|
Calculates the number of agents that belongs to each queue.
Args:
n_agents (int): Number of agents.
t_1 (float): Fitness value of first agent in the population.
t_2 (float): Fitness value of second agent in the population.
t_3 (float): Fitness value of third agent in the population.
Returns:
The number of agents in first, second and third queues.
|
"""Queuing Search Algorithm.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class QSA(Optimizer):
"""A QSA class, inherited from Optimizer.
This is the designed class to define QSA-related
variables and methods.
References:
J. Zhang et al. Queuing search algorithm: A novel metaheuristic algorithm
for solving engineering optimization problems.
Applied Mathematical Modelling (2018).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> QSA.')
# Overrides its parent class with the receiving params
super(QSA, self).__init__()
# Builds the class
self.build(params)
logger.info('Class overrided.')
# MASKED: _calculate_queue function (lines 47-80)
def _business_one(self, agents, function, beta):
"""Performs the first business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
beta (float): Range of fluctuation.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Represents the update patterns by eq. 4 and eq. 5
case = None
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# If it is the first agent in first queue
if i == 0:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# If index is the first agent in second queue
if i == q_1:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# If index is the first agent in third queue
if i == q_1 + q_2:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
alpha = r.generate_uniform_random_number(-1, 1)
# Generates an Erlang distribution
E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))
# If case is defined as one
if case == 1:
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Calculates the fluctuation (eq. 6)
F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \
e * (A.position - a.position)
# Updates the temporary agent's position (eq. 4)
a.position = A.position + F_1
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as one
case = 1
# If new fitness is worse than current agent's fitness
else:
# Defines the case as two
case = 2
# If case is defined as two
else:
# Calculates the fluctuation (eq. 7)
F_2 = beta * alpha * (E * np.fabs(A.position - a.position))
# Updates the temporary agent's position (eq. 5)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as two
case = 2
# If new fitness is worse than current agent's fitness
else:
# Defines the case as one
case = 1
def _business_two(self, agents, function):
"""Performs the second business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Calculates the confusion degree
cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# If random number is smaller than confusion degree
if r2 < cv:
# Calculates the fluctuation (eq. 14)
F_1 = e * (A_1.position - A_2.position)
# Update agent's position (eq. 12)
a.position += F_1
# If random number is bigger than confusion degree
else:
# Calculates the fluctuation (eq. 15)
F_2 = e * (A.position - A_1.position)
# Update agent's position (eq. 13)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def _business_three(self, agents, function):
"""Performs the third business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# Iterates through all decision variables
for j in range(agent.n_variables):
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Updates temporary agent's position (eq. 17)
a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def update(self, space, function, iteration, n_iterations):
"""Wraps Queue Search Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
iteration (int): Current iteration.
n_iterations (int): Maximum number of iterations.
"""
# Calculates the range of fluctuation.
beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))
# Performs the first business phase
self._business_one(space.agents, function, beta)
# Performs the second business phase
self._business_two(space.agents, function)
# Performs the third business phase
self._business_three(space.agents, function)
|
def _calculate_queue(self, n_agents, t_1, t_2, t_3):
"""Calculates the number of agents that belongs to each queue.
Args:
n_agents (int): Number of agents.
t_1 (float): Fitness value of first agent in the population.
t_2 (float): Fitness value of second agent in the population.
t_3 (float): Fitness value of third agent in the population.
Returns:
The number of agents in first, second and third queues.
"""
# Checks if potential service time is bigger than `epsilon`
if t_1 > c.EPSILON:
# Calculates the proportion of agents in first, second and third queues
n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
# If the potential service time is smaller than `epsilon`
else:
# Each queue will have 1/3 ratio
n_1 = 1 / 3
n_2 = 1 / 3
n_3 = 1 / 3
# Calculates the number of agents that belongs to each queue
q_1 = int(n_1 * n_agents)
q_2 = int(n_2 * n_agents)
q_3 = int(n_3 * n_agents)
return q_1, q_2, q_3
| 47
| 80
|
"""Queuing Search Algorithm.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class QSA(Optimizer):
"""A QSA class, inherited from Optimizer.
This is the designed class to define QSA-related
variables and methods.
References:
J. Zhang et al. Queuing search algorithm: A novel metaheuristic algorithm
for solving engineering optimization problems.
Applied Mathematical Modelling (2018).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> QSA.')
# Overrides its parent class with the receiving params
super(QSA, self).__init__()
# Builds the class
self.build(params)
logger.info('Class overrided.')
def _calculate_queue(self, n_agents, t_1, t_2, t_3):
"""Calculates the number of agents that belongs to each queue.
Args:
n_agents (int): Number of agents.
t_1 (float): Fitness value of first agent in the population.
t_2 (float): Fitness value of second agent in the population.
t_3 (float): Fitness value of third agent in the population.
Returns:
The number of agents in first, second and third queues.
"""
# Checks if potential service time is bigger than `epsilon`
if t_1 > c.EPSILON:
# Calculates the proportion of agents in first, second and third queues
n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
# If the potential service time is smaller than `epsilon`
else:
# Each queue will have 1/3 ratio
n_1 = 1 / 3
n_2 = 1 / 3
n_3 = 1 / 3
# Calculates the number of agents that belongs to each queue
q_1 = int(n_1 * n_agents)
q_2 = int(n_2 * n_agents)
q_3 = int(n_3 * n_agents)
return q_1, q_2, q_3
def _business_one(self, agents, function, beta):
"""Performs the first business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
beta (float): Range of fluctuation.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Represents the update patterns by eq. 4 and eq. 5
case = None
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# If it is the first agent in first queue
if i == 0:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# If index is the first agent in second queue
if i == q_1:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# If index is the first agent in third queue
if i == q_1 + q_2:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
alpha = r.generate_uniform_random_number(-1, 1)
# Generates an Erlang distribution
E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))
# If case is defined as one
if case == 1:
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Calculates the fluctuation (eq. 6)
F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \
e * (A.position - a.position)
# Updates the temporary agent's position (eq. 4)
a.position = A.position + F_1
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as one
case = 1
# If new fitness is worse than current agent's fitness
else:
# Defines the case as two
case = 2
# If case is defined as two
else:
# Calculates the fluctuation (eq. 7)
F_2 = beta * alpha * (E * np.fabs(A.position - a.position))
# Updates the temporary agent's position (eq. 5)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as two
case = 2
# If new fitness is worse than current agent's fitness
else:
# Defines the case as one
case = 1
def _business_two(self, agents, function):
"""Performs the second business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Calculates the confusion degree
cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# If random number is smaller than confusion degree
if r2 < cv:
# Calculates the fluctuation (eq. 14)
F_1 = e * (A_1.position - A_2.position)
# Update agent's position (eq. 12)
a.position += F_1
# If random number is bigger than confusion degree
else:
# Calculates the fluctuation (eq. 15)
F_2 = e * (A.position - A_1.position)
# Update agent's position (eq. 13)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def _business_three(self, agents, function):
"""Performs the third business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# Iterates through all decision variables
for j in range(agent.n_variables):
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Updates temporary agent's position (eq. 17)
a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def update(self, space, function, iteration, n_iterations):
"""Wraps Queue Search Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
iteration (int): Current iteration.
n_iterations (int): Maximum number of iterations.
"""
# Calculates the range of fluctuation.
beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))
# Performs the first business phase
self._business_one(space.agents, function, beta)
# Performs the second business phase
self._business_two(space.agents, function)
# Performs the third business phase
self._business_three(space.agents, function)
|
_business_three
|
Performs the third business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
|
"""Queuing Search Algorithm.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class QSA(Optimizer):
"""A QSA class, inherited from Optimizer.
This is the designed class to define QSA-related
variables and methods.
References:
J. Zhang et al. Queuing search algorithm: A novel metaheuristic algorithm
for solving engineering optimization problems.
Applied Mathematical Modelling (2018).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> QSA.')
# Overrides its parent class with the receiving params
super(QSA, self).__init__()
# Builds the class
self.build(params)
logger.info('Class overrided.')
def _calculate_queue(self, n_agents, t_1, t_2, t_3):
"""Calculates the number of agents that belongs to each queue.
Args:
n_agents (int): Number of agents.
t_1 (float): Fitness value of first agent in the population.
t_2 (float): Fitness value of second agent in the population.
t_3 (float): Fitness value of third agent in the population.
Returns:
The number of agents in first, second and third queues.
"""
# Checks if potential service time is bigger than `epsilon`
if t_1 > c.EPSILON:
# Calculates the proportion of agents in first, second and third queues
n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
# If the potential service time is smaller than `epsilon`
else:
# Each queue will have 1/3 ratio
n_1 = 1 / 3
n_2 = 1 / 3
n_3 = 1 / 3
# Calculates the number of agents that belongs to each queue
q_1 = int(n_1 * n_agents)
q_2 = int(n_2 * n_agents)
q_3 = int(n_3 * n_agents)
return q_1, q_2, q_3
def _business_one(self, agents, function, beta):
"""Performs the first business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
beta (float): Range of fluctuation.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Represents the update patterns by eq. 4 and eq. 5
case = None
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# If it is the first agent in first queue
if i == 0:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# If index is the first agent in second queue
if i == q_1:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# If index is the first agent in third queue
if i == q_1 + q_2:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
alpha = r.generate_uniform_random_number(-1, 1)
# Generates an Erlang distribution
E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))
# If case is defined as one
if case == 1:
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Calculates the fluctuation (eq. 6)
F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \
e * (A.position - a.position)
# Updates the temporary agent's position (eq. 4)
a.position = A.position + F_1
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as one
case = 1
# If new fitness is worse than current agent's fitness
else:
# Defines the case as two
case = 2
# If case is defined as two
else:
# Calculates the fluctuation (eq. 7)
F_2 = beta * alpha * (E * np.fabs(A.position - a.position))
# Updates the temporary agent's position (eq. 5)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as two
case = 2
# If new fitness is worse than current agent's fitness
else:
# Defines the case as one
case = 1
def _business_two(self, agents, function):
"""Performs the second business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Calculates the confusion degree
cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# If random number is smaller than confusion degree
if r2 < cv:
# Calculates the fluctuation (eq. 14)
F_1 = e * (A_1.position - A_2.position)
# Update agent's position (eq. 12)
a.position += F_1
# If random number is bigger than confusion degree
else:
# Calculates the fluctuation (eq. 15)
F_2 = e * (A.position - A_1.position)
# Update agent's position (eq. 13)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# MASKED: _business_three function (lines 282-325)
def update(self, space, function, iteration, n_iterations):
"""Wraps Queue Search Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
iteration (int): Current iteration.
n_iterations (int): Maximum number of iterations.
"""
# Calculates the range of fluctuation.
beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))
# Performs the first business phase
self._business_one(space.agents, function, beta)
# Performs the second business phase
self._business_two(space.agents, function)
# Performs the third business phase
self._business_three(space.agents, function)
|
def _business_three(self, agents, function):
"""Performs the third business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# Iterates through all decision variables
for j in range(agent.n_variables):
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Updates temporary agent's position (eq. 17)
a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
| 282
| 325
|
"""Queuing Search Algorithm.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class QSA(Optimizer):
"""A QSA class, inherited from Optimizer.
This is the designed class to define QSA-related
variables and methods.
References:
J. Zhang et al. Queuing search algorithm: A novel metaheuristic algorithm
for solving engineering optimization problems.
Applied Mathematical Modelling (2018).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> QSA.')
# Overrides its parent class with the receiving params
super(QSA, self).__init__()
# Builds the class
self.build(params)
logger.info('Class overrided.')
def _calculate_queue(self, n_agents, t_1, t_2, t_3):
"""Calculates the number of agents that belongs to each queue.
Args:
n_agents (int): Number of agents.
t_1 (float): Fitness value of first agent in the population.
t_2 (float): Fitness value of second agent in the population.
t_3 (float): Fitness value of third agent in the population.
Returns:
The number of agents in first, second and third queues.
"""
# Checks if potential service time is bigger than `epsilon`
if t_1 > c.EPSILON:
# Calculates the proportion of agents in first, second and third queues
n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))
# If the potential service time is smaller than `epsilon`
else:
# Each queue will have 1/3 ratio
n_1 = 1 / 3
n_2 = 1 / 3
n_3 = 1 / 3
# Calculates the number of agents that belongs to each queue
q_1 = int(n_1 * n_agents)
q_2 = int(n_2 * n_agents)
q_3 = int(n_3 * n_agents)
return q_1, q_2, q_3
def _business_one(self, agents, function, beta):
"""Performs the first business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
beta (float): Range of fluctuation.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Represents the update patterns by eq. 4 and eq. 5
case = None
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# If it is the first agent in first queue
if i == 0:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# If index is the first agent in second queue
if i == q_1:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# If index is the first agent in third queue
if i == q_1 + q_2:
# Defines the case as one
case = 1
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
alpha = r.generate_uniform_random_number(-1, 1)
# Generates an Erlang distribution
E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))
# If case is defined as one
if case == 1:
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Calculates the fluctuation (eq. 6)
F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \
e * (A.position - a.position)
# Updates the temporary agent's position (eq. 4)
a.position = A.position + F_1
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as one
case = 1
# If new fitness is worse than current agent's fitness
else:
# Defines the case as two
case = 2
# If case is defined as two
else:
# Calculates the fluctuation (eq. 7)
F_2 = beta * alpha * (E * np.fabs(A.position - a.position))
# Updates the temporary agent's position (eq. 5)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If new fitness is better than current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
# Defines the case as two
case = 2
# If new fitness is worse than current agent's fitness
else:
# Defines the case as one
case = 1
def _business_two(self, agents, function):
"""Performs the second business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Copies temporary agents to represent `A_1`, `A_2` and `A_3`
A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])
# Calculates the number of agents in each queue
q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Calculates the confusion degree
cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# If index is smaller than the number of agents in first queue
if i < q_1:
# `A` will receive a copy from `A_1`
A = copy.deepcopy(A_1)
# If index is between first and second queues
elif q_1 <= i < q_1 + q_2:
# `A` will receive a copy from `A_2`
A = copy.deepcopy(A_2)
# If index is between second and third queues
else:
# `A` will receive a copy from `A_3`
A = copy.deepcopy(A_3)
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# If random number is smaller than confusion degree
if r2 < cv:
# Calculates the fluctuation (eq. 14)
F_1 = e * (A_1.position - A_2.position)
# Update agent's position (eq. 12)
a.position += F_1
# If random number is bigger than confusion degree
else:
# Calculates the fluctuation (eq. 15)
F_2 = e * (A.position - A_1.position)
# Update agent's position (eq. 13)
a.position += F_2
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def _business_three(self, agents, function):
"""Performs the third business phase.
Args:
agents (list): List of agents.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts agents
agents.sort(key=lambda x: x.fit)
# Calculates the probability of handling the business
pr = [i / len(agents) for i in range(1, len(agents) + 1)]
# Iterates through all agents
for i, agent in enumerate(agents):
# Creates another temporary agent
a = copy.deepcopy(agent)
# Iterates through all decision variables
for j in range(agent.n_variables):
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of handling the business
if r1 < pr[i]:
# Randomly selects two individuals
A_1, A_2 = np.random.choice(agents, 2, replace=False)
# Generates an Erlang number
e = r.generate_gamma_random_number(1, 0.5, 1)
# Updates temporary agent's position (eq. 17)
a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])
# Evaluates the agent
a.fit = function(a.position)
# If the new fitness is better than the current agent's fitness
if a.fit < agent.fit:
# Replaces the current agent's position and fitness
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
def update(self, space, function, iteration, n_iterations):
"""Wraps Queue Search Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
iteration (int): Current iteration.
n_iterations (int): Maximum number of iterations.
"""
# Calculates the range of fluctuation.
beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))
# Performs the first business phase
self._business_one(space.agents, function, beta)
# Performs the second business phase
self._business_two(space.agents, function)
# Performs the third business phase
self._business_three(space.agents, function)
|
_parse_actions
|
Actions come in as a combined list. This method separates the webhook actions into a
separate collection and combines any number of email actions into a single email collection
and a single value for `email_service_owners`. If any email action contains a True value
for `send_to_service_owners` then it is assumed the entire value should be True.
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.command_modules.monitor.util import get_operator_map, get_aggregation_map
from knack.log import get_logger
logger = get_logger(__name__)
def create_metric_alert(client, resource_group_name, rule_name, scopes, condition, disabled=False, description=None,
tags=None, actions=None, severity=2, window_size='5m', evaluation_frequency='1m',
auto_mitigate=None):
from azure.mgmt.monitor.models import (MetricAlertResource,
MetricAlertSingleResourceMultipleMetricCriteria,
MetricAlertMultipleResourceMultipleMetricCriteria)
# generate names for the conditions
for i, cond in enumerate(condition):
cond.name = 'cond{}'.format(i)
criteria = None
target_resource_type = None
target_resource_region = None
if len(scopes) == 1:
criteria = MetricAlertSingleResourceMultipleMetricCriteria(all_of=condition)
else:
criteria = MetricAlertMultipleResourceMultipleMetricCriteria(all_of=condition)
target_resource_type = _parse_resource_type(scopes)
target_resource_region = 'global'
kwargs = {
'description': description,
'severity': severity,
'enabled': not disabled,
'scopes': scopes,
'evaluation_frequency': evaluation_frequency,
'window_size': window_size,
'criteria': criteria,
'target_resource_type': target_resource_type,
'target_resource_region': target_resource_region,
'actions': actions,
'tags': tags,
'location': 'global',
'auto_mitigate': auto_mitigate
}
return client.create_or_update(resource_group_name, rule_name, MetricAlertResource(**kwargs))
def update_metric_alert(instance, scopes=None, description=None, enabled=None, tags=None,
severity=None, window_size=None, evaluation_frequency=None, auto_mitigate=None,
add_actions=None, remove_actions=None, add_conditions=None, remove_conditions=None):
if scopes is not None:
instance.scopes = scopes
if description is not None:
instance.description = description
if enabled is not None:
instance.enabled = enabled
if tags is not None:
instance.tags = tags
if severity is not None:
instance.severity = severity
if window_size is not None:
instance.window_size = window_size
if evaluation_frequency is not None:
instance.evaluation_frequency = evaluation_frequency
if auto_mitigate is not None:
instance.auto_mitigate = auto_mitigate
# process action removals
if remove_actions is not None:
instance.actions = [x for x in instance.actions if x.action_group_id.lower() not in remove_actions]
# process action additions
if add_actions is not None:
for action in add_actions:
match = next(
(x for x in instance.actions if action.action_group_id.lower() == x.action_group_id.lower()), None
)
if match:
match.webhook_properties = action.webhook_properties
else:
instance.actions.append(action)
# process condition removals
if remove_conditions is not None:
instance.criteria.all_of = [x for x in instance.criteria.all_of if x.name not in remove_conditions]
def _get_next_name():
i = 0
while True:
possible_name = 'cond{}'.format(i)
match = next((x for x in instance.criteria.all_of if x.name == possible_name), None)
if match:
i = i + 1
continue
return possible_name
# process condition additions
if add_conditions is not None:
for condition in add_conditions:
condition.name = _get_next_name()
instance.criteria.all_of.append(condition)
return instance
def list_metric_alerts(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_metric_rule(client, resource_group_name, rule_name, target, condition, description=None, disabled=False,
location=None, tags=None, email_service_owners=False, actions=None):
from azure.mgmt.monitor.models import AlertRuleResource, RuleEmailAction
condition.data_source.resource_uri = target
custom_emails, webhooks, _ = _parse_actions(actions)
actions = [
RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=custom_emails)
] + (webhooks or [])
rule = AlertRuleResource(
location=location, alert_rule_resource_name=rule_name, is_enabled=not disabled,
condition=condition, tags=tags, description=description, actions=actions)
return client.create_or_update(resource_group_name, rule_name, rule)
def update_metric_rule(instance, target=None, condition=None, description=None, enabled=None, metric=None,
operator=None, threshold=None, aggregation=None, period=None, tags=None,
email_service_owners=None, add_actions=None, remove_actions=None):
# Update general properties
if description is not None:
instance.description = description
if enabled is not None:
instance.is_enabled = enabled
if tags is not None:
instance.tags = tags
# Update conditions
if condition is not None:
target = target or instance.condition.data_source.resource_uri
instance.condition = condition
if metric is not None:
instance.condition.data_source.metric_name = metric
if operator is not None:
instance.condition.operator = get_operator_map()[operator]
if threshold is not None:
instance.condition.threshold = threshold
if aggregation is not None:
instance.condition.time_aggregation = get_aggregation_map()[aggregation]
if period is not None:
instance.condition.window_size = period
if target is not None:
instance.condition.data_source.resource_uri = target
# Update actions
emails, webhooks, curr_email_service_owners = _parse_actions(instance.actions)
# process removals
if remove_actions is not None:
removed_emails, removed_webhooks = _parse_action_removals(remove_actions)
emails = [x for x in emails if x not in removed_emails]
webhooks = [x for x in webhooks if x.service_uri not in removed_webhooks]
# process additions
if add_actions is not None:
added_emails, added_webhooks, _ = _parse_actions(add_actions)
emails = list(set(emails) | set(added_emails))
webhooks = webhooks + added_webhooks
# Replace the existing actions array. This potentially restructures rules that were created
# via other methods (Portal, ARM template). However, the functionality of these rules should
# be the same.
from azure.mgmt.monitor.models import RuleEmailAction
if email_service_owners is None:
email_service_owners = curr_email_service_owners
actions = [RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=emails)] + webhooks
instance.actions = actions
return instance
# MASKED: _parse_actions function (lines 184-199)
def _parse_action_removals(actions):
""" Separates the combined list of keys to remove into webhooks and emails. """
flattened = list({x for sublist in actions for x in sublist})
emails = []
webhooks = []
for item in flattened:
if item.startswith('http://') or item.startswith('https://'):
webhooks.append(item)
else:
emails.append(item)
return emails, webhooks
def _parse_resource_type(scopes):
from msrestazure.tools import parse_resource_id
from azure.cli.core import CLIError
namespace = None
resource_type = None
for item in scopes:
item_namespace = parse_resource_id(item)['namespace']
item_resource_type = parse_resource_id(item)['resource_type']
if namespace is None and resource_type is None:
namespace = item_namespace
resource_type = item_resource_type
else:
if namespace != item_namespace or resource_type != item_resource_type:
raise CLIError('Multiple scopes should be the same resource type.')
return namespace + '/' + resource_type
|
def _parse_actions(actions):
""" Actions come in as a combined list. This method separates the webhook actions into a
separate collection and combines any number of email actions into a single email collection
and a single value for `email_service_owners`. If any email action contains a True value
for `send_to_service_owners` then it is assumed the entire value should be True. """
from azure.mgmt.monitor.models import RuleEmailAction, RuleWebhookAction
actions = actions or []
email_service_owners = None
webhooks = [x for x in actions if isinstance(x, RuleWebhookAction)]
custom_emails = set()
for action in actions:
if isinstance(action, RuleEmailAction):
if action.send_to_service_owners:
email_service_owners = True
custom_emails = custom_emails | set(action.custom_emails)
return list(custom_emails), webhooks, email_service_owners
| 184
| 199
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.command_modules.monitor.util import get_operator_map, get_aggregation_map
from knack.log import get_logger
logger = get_logger(__name__)
def create_metric_alert(client, resource_group_name, rule_name, scopes, condition, disabled=False, description=None,
tags=None, actions=None, severity=2, window_size='5m', evaluation_frequency='1m',
auto_mitigate=None):
from azure.mgmt.monitor.models import (MetricAlertResource,
MetricAlertSingleResourceMultipleMetricCriteria,
MetricAlertMultipleResourceMultipleMetricCriteria)
# generate names for the conditions
for i, cond in enumerate(condition):
cond.name = 'cond{}'.format(i)
criteria = None
target_resource_type = None
target_resource_region = None
if len(scopes) == 1:
criteria = MetricAlertSingleResourceMultipleMetricCriteria(all_of=condition)
else:
criteria = MetricAlertMultipleResourceMultipleMetricCriteria(all_of=condition)
target_resource_type = _parse_resource_type(scopes)
target_resource_region = 'global'
kwargs = {
'description': description,
'severity': severity,
'enabled': not disabled,
'scopes': scopes,
'evaluation_frequency': evaluation_frequency,
'window_size': window_size,
'criteria': criteria,
'target_resource_type': target_resource_type,
'target_resource_region': target_resource_region,
'actions': actions,
'tags': tags,
'location': 'global',
'auto_mitigate': auto_mitigate
}
return client.create_or_update(resource_group_name, rule_name, MetricAlertResource(**kwargs))
def update_metric_alert(instance, scopes=None, description=None, enabled=None, tags=None,
severity=None, window_size=None, evaluation_frequency=None, auto_mitigate=None,
add_actions=None, remove_actions=None, add_conditions=None, remove_conditions=None):
if scopes is not None:
instance.scopes = scopes
if description is not None:
instance.description = description
if enabled is not None:
instance.enabled = enabled
if tags is not None:
instance.tags = tags
if severity is not None:
instance.severity = severity
if window_size is not None:
instance.window_size = window_size
if evaluation_frequency is not None:
instance.evaluation_frequency = evaluation_frequency
if auto_mitigate is not None:
instance.auto_mitigate = auto_mitigate
# process action removals
if remove_actions is not None:
instance.actions = [x for x in instance.actions if x.action_group_id.lower() not in remove_actions]
# process action additions
if add_actions is not None:
for action in add_actions:
match = next(
(x for x in instance.actions if action.action_group_id.lower() == x.action_group_id.lower()), None
)
if match:
match.webhook_properties = action.webhook_properties
else:
instance.actions.append(action)
# process condition removals
if remove_conditions is not None:
instance.criteria.all_of = [x for x in instance.criteria.all_of if x.name not in remove_conditions]
def _get_next_name():
i = 0
while True:
possible_name = 'cond{}'.format(i)
match = next((x for x in instance.criteria.all_of if x.name == possible_name), None)
if match:
i = i + 1
continue
return possible_name
# process condition additions
if add_conditions is not None:
for condition in add_conditions:
condition.name = _get_next_name()
instance.criteria.all_of.append(condition)
return instance
def list_metric_alerts(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_metric_rule(client, resource_group_name, rule_name, target, condition, description=None, disabled=False,
location=None, tags=None, email_service_owners=False, actions=None):
from azure.mgmt.monitor.models import AlertRuleResource, RuleEmailAction
condition.data_source.resource_uri = target
custom_emails, webhooks, _ = _parse_actions(actions)
actions = [
RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=custom_emails)
] + (webhooks or [])
rule = AlertRuleResource(
location=location, alert_rule_resource_name=rule_name, is_enabled=not disabled,
condition=condition, tags=tags, description=description, actions=actions)
return client.create_or_update(resource_group_name, rule_name, rule)
def update_metric_rule(instance, target=None, condition=None, description=None, enabled=None, metric=None,
operator=None, threshold=None, aggregation=None, period=None, tags=None,
email_service_owners=None, add_actions=None, remove_actions=None):
# Update general properties
if description is not None:
instance.description = description
if enabled is not None:
instance.is_enabled = enabled
if tags is not None:
instance.tags = tags
# Update conditions
if condition is not None:
target = target or instance.condition.data_source.resource_uri
instance.condition = condition
if metric is not None:
instance.condition.data_source.metric_name = metric
if operator is not None:
instance.condition.operator = get_operator_map()[operator]
if threshold is not None:
instance.condition.threshold = threshold
if aggregation is not None:
instance.condition.time_aggregation = get_aggregation_map()[aggregation]
if period is not None:
instance.condition.window_size = period
if target is not None:
instance.condition.data_source.resource_uri = target
# Update actions
emails, webhooks, curr_email_service_owners = _parse_actions(instance.actions)
# process removals
if remove_actions is not None:
removed_emails, removed_webhooks = _parse_action_removals(remove_actions)
emails = [x for x in emails if x not in removed_emails]
webhooks = [x for x in webhooks if x.service_uri not in removed_webhooks]
# process additions
if add_actions is not None:
added_emails, added_webhooks, _ = _parse_actions(add_actions)
emails = list(set(emails) | set(added_emails))
webhooks = webhooks + added_webhooks
# Replace the existing actions array. This potentially restructures rules that were created
# via other methods (Portal, ARM template). However, the functionality of these rules should
# be the same.
from azure.mgmt.monitor.models import RuleEmailAction
if email_service_owners is None:
email_service_owners = curr_email_service_owners
actions = [RuleEmailAction(send_to_service_owners=email_service_owners, custom_emails=emails)] + webhooks
instance.actions = actions
return instance
def _parse_actions(actions):
""" Actions come in as a combined list. This method separates the webhook actions into a
separate collection and combines any number of email actions into a single email collection
and a single value for `email_service_owners`. If any email action contains a True value
for `send_to_service_owners` then it is assumed the entire value should be True. """
from azure.mgmt.monitor.models import RuleEmailAction, RuleWebhookAction
actions = actions or []
email_service_owners = None
webhooks = [x for x in actions if isinstance(x, RuleWebhookAction)]
custom_emails = set()
for action in actions:
if isinstance(action, RuleEmailAction):
if action.send_to_service_owners:
email_service_owners = True
custom_emails = custom_emails | set(action.custom_emails)
return list(custom_emails), webhooks, email_service_owners
def _parse_action_removals(actions):
""" Separates the combined list of keys to remove into webhooks and emails. """
flattened = list({x for sublist in actions for x in sublist})
emails = []
webhooks = []
for item in flattened:
if item.startswith('http://') or item.startswith('https://'):
webhooks.append(item)
else:
emails.append(item)
return emails, webhooks
def _parse_resource_type(scopes):
from msrestazure.tools import parse_resource_id
from azure.cli.core import CLIError
namespace = None
resource_type = None
for item in scopes:
item_namespace = parse_resource_id(item)['namespace']
item_resource_type = parse_resource_id(item)['resource_type']
if namespace is None and resource_type is None:
namespace = item_namespace
resource_type = item_resource_type
else:
if namespace != item_namespace or resource_type != item_resource_type:
raise CLIError('Multiple scopes should be the same resource type.')
return namespace + '/' + resource_type
|
_request_locks
|
Request locks
Parameters
----------
locks: List[str]
Names of the locks to request.
id: Hashable
Identifier of the `MultiLock` instance requesting the locks.
num_locks: int
Number of locks in `locks` requesting
Return
------
result: bool
Whether `num_locks` requested locks are free immediately or not.
|
from __future__ import annotations
import asyncio
import logging
import uuid
from collections import defaultdict
from collections.abc import Hashable
from dask.utils import parse_timedelta
from distributed.client import Client
from distributed.utils import TimeoutError, log_errors
from distributed.worker import get_worker
logger = logging.getLogger(__name__)
class MultiLockExtension:
"""An extension for the scheduler to manage MultiLocks
This adds the following routes to the scheduler
* multi_lock_acquire
* multi_lock_release
The approach is to maintain `self.locks` that maps a lock (unique name given to
`MultiLock(names=, ...)` at creation) to a list of users (instances of `MultiLock`)
that "requests" the lock. Additionally, `self.requests` maps a user to its requested
locks and `self.requests_left` maps a user to the number of locks still need.
Every time a user `x` gets to the front in `self.locks[name] = [x, ...]` it means
that `x` now holds the lock `name` and when it holds all the requested locks
`acquire()` can return.
Finally, `self.events` contains all the events users are waiting on to finish.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
self.locks = defaultdict(list) # lock -> users
self.requests = {} # user -> locks
self.requests_left = {} # user -> locks still needed
self.events = {}
self.scheduler.handlers.update(
{"multi_lock_acquire": self.acquire, "multi_lock_release": self.release}
)
# MASKED: _request_locks function (lines 49-80)
def _refain_locks(self, locks, id):
"""Cancel/release previously requested/acquired locks
Parameters
----------
locks: List[str]
Names of the locks to refain.
id: Hashable
Identifier of the `MultiLock` instance refraining the locks.
"""
waiters_ready = set()
for lock in locks:
if self.locks[lock][0] == id:
self.locks[lock].pop(0)
if self.locks[lock]:
new_first = self.locks[lock][0]
self.requests_left[new_first] -= 1
if self.requests_left[new_first] <= 0:
# Notice, `self.requests_left[new_first]` might go below zero
# if more locks are freed than requested.
self.requests_left[new_first] = 0
waiters_ready.add(new_first)
else:
self.locks[lock].remove(id)
assert id not in self.locks[lock]
del self.requests[id]
del self.requests_left[id]
for waiter in waiters_ready:
self.scheduler.loop.add_callback(self.events[waiter].set)
async def acquire(self, locks=None, id=None, timeout=None, num_locks=None):
with log_errors():
if not self._request_locks(locks, id, num_locks):
assert id not in self.events
event = asyncio.Event()
self.events[id] = event
future = event.wait()
if timeout is not None:
future = asyncio.wait_for(future, timeout)
try:
await future
except TimeoutError:
self._refain_locks(locks, id)
return False
finally:
del self.events[id]
# At this point `id` acquired all `locks`
assert self.requests_left[id] == 0
return True
def release(self, id=None):
with log_errors():
self._refain_locks(self.requests[id], id)
class MultiLock:
"""Distributed Centralized Lock
Parameters
----------
names: List[str]
Names of the locks to acquire. Choosing the same name allows two
disconnected processes to coordinate a lock.
client: Client (optional)
Client to use for communication with the scheduler. If not given, the
default global client will be used.
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout=1) # doctest: +SKIP
>>> # do things with protected resource 'x' and 'y'
>>> lock.release() # doctest: +SKIP
"""
def __init__(self, names=[], client=None):
try:
self.client = client or Client.current()
except ValueError:
# Initialise new client
self.client = get_worker().client
self.names = names
self.id = uuid.uuid4().hex
self._locked = False
def acquire(self, blocking=True, timeout=None, num_locks=None):
"""Acquire the lock
Parameters
----------
blocking : bool, optional
If false, don't wait on the lock in the scheduler at all.
timeout : string or number or timedelta, optional
Seconds to wait on the lock in the scheduler. This does not
include local coroutine time, network transfer time, etc..
It is forbidden to specify a timeout when blocking is false.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
num_locks : int, optional
Number of locks needed. If None, all locks are needed
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout="1s") # doctest: +SKIP
Returns
-------
True or False whether or not it successfully acquired the lock
"""
timeout = parse_timedelta(timeout)
if not blocking:
if timeout is not None:
raise ValueError("can't specify a timeout for a non-blocking call")
timeout = 0
result = self.client.sync(
self.client.scheduler.multi_lock_acquire,
locks=self.names,
id=self.id,
timeout=timeout,
num_locks=num_locks or len(self.names),
)
self._locked = True
return result
def release(self):
"""Release the lock if already acquired"""
if not self.locked():
raise ValueError("Lock is not yet acquired")
ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id)
self._locked = False
return ret
def locked(self):
return self._locked
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args, **kwargs):
self.release()
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *args, **kwargs):
await self.release()
def __reduce__(self):
return (type(self), (self.names,))
|
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool:
"""Request locks
Parameters
----------
locks: List[str]
Names of the locks to request.
id: Hashable
Identifier of the `MultiLock` instance requesting the locks.
num_locks: int
Number of locks in `locks` requesting
Return
------
result: bool
Whether `num_locks` requested locks are free immediately or not.
"""
assert id not in self.requests
self.requests[id] = set(locks)
assert len(locks) >= num_locks and num_locks > 0
self.requests_left[id] = num_locks
locks = sorted(locks, key=lambda x: len(self.locks[x]))
for i, lock in enumerate(locks):
self.locks[lock].append(id)
if len(self.locks[lock]) == 1: # The lock was free
self.requests_left[id] -= 1
if self.requests_left[id] == 0: # Got all locks needed
# Since we got all locks need, we can remove the rest of the requests
self.requests[id] -= set(locks[i + 1 :])
return True
return False
| 49
| 80
|
from __future__ import annotations
import asyncio
import logging
import uuid
from collections import defaultdict
from collections.abc import Hashable
from dask.utils import parse_timedelta
from distributed.client import Client
from distributed.utils import TimeoutError, log_errors
from distributed.worker import get_worker
logger = logging.getLogger(__name__)
class MultiLockExtension:
"""An extension for the scheduler to manage MultiLocks
This adds the following routes to the scheduler
* multi_lock_acquire
* multi_lock_release
The approach is to maintain `self.locks` that maps a lock (unique name given to
`MultiLock(names=, ...)` at creation) to a list of users (instances of `MultiLock`)
that "requests" the lock. Additionally, `self.requests` maps a user to its requested
locks and `self.requests_left` maps a user to the number of locks still need.
Every time a user `x` gets to the front in `self.locks[name] = [x, ...]` it means
that `x` now holds the lock `name` and when it holds all the requested locks
`acquire()` can return.
Finally, `self.events` contains all the events users are waiting on to finish.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
self.locks = defaultdict(list) # lock -> users
self.requests = {} # user -> locks
self.requests_left = {} # user -> locks still needed
self.events = {}
self.scheduler.handlers.update(
{"multi_lock_acquire": self.acquire, "multi_lock_release": self.release}
)
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool:
"""Request locks
Parameters
----------
locks: List[str]
Names of the locks to request.
id: Hashable
Identifier of the `MultiLock` instance requesting the locks.
num_locks: int
Number of locks in `locks` requesting
Return
------
result: bool
Whether `num_locks` requested locks are free immediately or not.
"""
assert id not in self.requests
self.requests[id] = set(locks)
assert len(locks) >= num_locks and num_locks > 0
self.requests_left[id] = num_locks
locks = sorted(locks, key=lambda x: len(self.locks[x]))
for i, lock in enumerate(locks):
self.locks[lock].append(id)
if len(self.locks[lock]) == 1: # The lock was free
self.requests_left[id] -= 1
if self.requests_left[id] == 0: # Got all locks needed
# Since we got all locks need, we can remove the rest of the requests
self.requests[id] -= set(locks[i + 1 :])
return True
return False
def _refain_locks(self, locks, id):
"""Cancel/release previously requested/acquired locks
Parameters
----------
locks: List[str]
Names of the locks to refain.
id: Hashable
Identifier of the `MultiLock` instance refraining the locks.
"""
waiters_ready = set()
for lock in locks:
if self.locks[lock][0] == id:
self.locks[lock].pop(0)
if self.locks[lock]:
new_first = self.locks[lock][0]
self.requests_left[new_first] -= 1
if self.requests_left[new_first] <= 0:
# Notice, `self.requests_left[new_first]` might go below zero
# if more locks are freed than requested.
self.requests_left[new_first] = 0
waiters_ready.add(new_first)
else:
self.locks[lock].remove(id)
assert id not in self.locks[lock]
del self.requests[id]
del self.requests_left[id]
for waiter in waiters_ready:
self.scheduler.loop.add_callback(self.events[waiter].set)
async def acquire(self, locks=None, id=None, timeout=None, num_locks=None):
with log_errors():
if not self._request_locks(locks, id, num_locks):
assert id not in self.events
event = asyncio.Event()
self.events[id] = event
future = event.wait()
if timeout is not None:
future = asyncio.wait_for(future, timeout)
try:
await future
except TimeoutError:
self._refain_locks(locks, id)
return False
finally:
del self.events[id]
# At this point `id` acquired all `locks`
assert self.requests_left[id] == 0
return True
def release(self, id=None):
with log_errors():
self._refain_locks(self.requests[id], id)
class MultiLock:
"""Distributed Centralized Lock
Parameters
----------
names: List[str]
Names of the locks to acquire. Choosing the same name allows two
disconnected processes to coordinate a lock.
client: Client (optional)
Client to use for communication with the scheduler. If not given, the
default global client will be used.
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout=1) # doctest: +SKIP
>>> # do things with protected resource 'x' and 'y'
>>> lock.release() # doctest: +SKIP
"""
def __init__(self, names=[], client=None):
try:
self.client = client or Client.current()
except ValueError:
# Initialise new client
self.client = get_worker().client
self.names = names
self.id = uuid.uuid4().hex
self._locked = False
def acquire(self, blocking=True, timeout=None, num_locks=None):
"""Acquire the lock
Parameters
----------
blocking : bool, optional
If false, don't wait on the lock in the scheduler at all.
timeout : string or number or timedelta, optional
Seconds to wait on the lock in the scheduler. This does not
include local coroutine time, network transfer time, etc..
It is forbidden to specify a timeout when blocking is false.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
num_locks : int, optional
Number of locks needed. If None, all locks are needed
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout="1s") # doctest: +SKIP
Returns
-------
True or False whether or not it successfully acquired the lock
"""
timeout = parse_timedelta(timeout)
if not blocking:
if timeout is not None:
raise ValueError("can't specify a timeout for a non-blocking call")
timeout = 0
result = self.client.sync(
self.client.scheduler.multi_lock_acquire,
locks=self.names,
id=self.id,
timeout=timeout,
num_locks=num_locks or len(self.names),
)
self._locked = True
return result
def release(self):
"""Release the lock if already acquired"""
if not self.locked():
raise ValueError("Lock is not yet acquired")
ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id)
self._locked = False
return ret
def locked(self):
return self._locked
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args, **kwargs):
self.release()
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *args, **kwargs):
await self.release()
def __reduce__(self):
return (type(self), (self.names,))
|
acquire
|
Acquire the lock
Parameters
----------
blocking : bool, optional
If false, don't wait on the lock in the scheduler at all.
timeout : string or number or timedelta, optional
Seconds to wait on the lock in the scheduler. This does not
include local coroutine time, network transfer time, etc..
It is forbidden to specify a timeout when blocking is false.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
num_locks : int, optional
Number of locks needed. If None, all locks are needed
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout="1s") # doctest: +SKIP
Returns
-------
True or False whether or not it successfully acquired the lock
|
from __future__ import annotations
import asyncio
import logging
import uuid
from collections import defaultdict
from collections.abc import Hashable
from dask.utils import parse_timedelta
from distributed.client import Client
from distributed.utils import TimeoutError, log_errors
from distributed.worker import get_worker
logger = logging.getLogger(__name__)
class MultiLockExtension:
"""An extension for the scheduler to manage MultiLocks
This adds the following routes to the scheduler
* multi_lock_acquire
* multi_lock_release
The approach is to maintain `self.locks` that maps a lock (unique name given to
`MultiLock(names=, ...)` at creation) to a list of users (instances of `MultiLock`)
that "requests" the lock. Additionally, `self.requests` maps a user to its requested
locks and `self.requests_left` maps a user to the number of locks still need.
Every time a user `x` gets to the front in `self.locks[name] = [x, ...]` it means
that `x` now holds the lock `name` and when it holds all the requested locks
`acquire()` can return.
Finally, `self.events` contains all the events users are waiting on to finish.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
self.locks = defaultdict(list) # lock -> users
self.requests = {} # user -> locks
self.requests_left = {} # user -> locks still needed
self.events = {}
self.scheduler.handlers.update(
{"multi_lock_acquire": self.acquire, "multi_lock_release": self.release}
)
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool:
"""Request locks
Parameters
----------
locks: List[str]
Names of the locks to request.
id: Hashable
Identifier of the `MultiLock` instance requesting the locks.
num_locks: int
Number of locks in `locks` requesting
Return
------
result: bool
Whether `num_locks` requested locks are free immediately or not.
"""
assert id not in self.requests
self.requests[id] = set(locks)
assert len(locks) >= num_locks and num_locks > 0
self.requests_left[id] = num_locks
locks = sorted(locks, key=lambda x: len(self.locks[x]))
for i, lock in enumerate(locks):
self.locks[lock].append(id)
if len(self.locks[lock]) == 1: # The lock was free
self.requests_left[id] -= 1
if self.requests_left[id] == 0: # Got all locks needed
# Since we got all locks need, we can remove the rest of the requests
self.requests[id] -= set(locks[i + 1 :])
return True
return False
def _refain_locks(self, locks, id):
"""Cancel/release previously requested/acquired locks
Parameters
----------
locks: List[str]
Names of the locks to refain.
id: Hashable
Identifier of the `MultiLock` instance refraining the locks.
"""
waiters_ready = set()
for lock in locks:
if self.locks[lock][0] == id:
self.locks[lock].pop(0)
if self.locks[lock]:
new_first = self.locks[lock][0]
self.requests_left[new_first] -= 1
if self.requests_left[new_first] <= 0:
# Notice, `self.requests_left[new_first]` might go below zero
# if more locks are freed than requested.
self.requests_left[new_first] = 0
waiters_ready.add(new_first)
else:
self.locks[lock].remove(id)
assert id not in self.locks[lock]
del self.requests[id]
del self.requests_left[id]
for waiter in waiters_ready:
self.scheduler.loop.add_callback(self.events[waiter].set)
async def acquire(self, locks=None, id=None, timeout=None, num_locks=None):
with log_errors():
if not self._request_locks(locks, id, num_locks):
assert id not in self.events
event = asyncio.Event()
self.events[id] = event
future = event.wait()
if timeout is not None:
future = asyncio.wait_for(future, timeout)
try:
await future
except TimeoutError:
self._refain_locks(locks, id)
return False
finally:
del self.events[id]
# At this point `id` acquired all `locks`
assert self.requests_left[id] == 0
return True
def release(self, id=None):
with log_errors():
self._refain_locks(self.requests[id], id)
class MultiLock:
"""Distributed Centralized Lock
Parameters
----------
names: List[str]
Names of the locks to acquire. Choosing the same name allows two
disconnected processes to coordinate a lock.
client: Client (optional)
Client to use for communication with the scheduler. If not given, the
default global client will be used.
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout=1) # doctest: +SKIP
>>> # do things with protected resource 'x' and 'y'
>>> lock.release() # doctest: +SKIP
"""
def __init__(self, names=[], client=None):
try:
self.client = client or Client.current()
except ValueError:
# Initialise new client
self.client = get_worker().client
self.names = names
self.id = uuid.uuid4().hex
self._locked = False
# MASKED: acquire function (lines 169-209)
def release(self):
"""Release the lock if already acquired"""
if not self.locked():
raise ValueError("Lock is not yet acquired")
ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id)
self._locked = False
return ret
def locked(self):
return self._locked
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args, **kwargs):
self.release()
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *args, **kwargs):
await self.release()
def __reduce__(self):
return (type(self), (self.names,))
|
def acquire(self, blocking=True, timeout=None, num_locks=None):
"""Acquire the lock
Parameters
----------
blocking : bool, optional
If false, don't wait on the lock in the scheduler at all.
timeout : string or number or timedelta, optional
Seconds to wait on the lock in the scheduler. This does not
include local coroutine time, network transfer time, etc..
It is forbidden to specify a timeout when blocking is false.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
num_locks : int, optional
Number of locks needed. If None, all locks are needed
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout="1s") # doctest: +SKIP
Returns
-------
True or False whether or not it successfully acquired the lock
"""
timeout = parse_timedelta(timeout)
if not blocking:
if timeout is not None:
raise ValueError("can't specify a timeout for a non-blocking call")
timeout = 0
result = self.client.sync(
self.client.scheduler.multi_lock_acquire,
locks=self.names,
id=self.id,
timeout=timeout,
num_locks=num_locks or len(self.names),
)
self._locked = True
return result
| 169
| 209
|
from __future__ import annotations
import asyncio
import logging
import uuid
from collections import defaultdict
from collections.abc import Hashable
from dask.utils import parse_timedelta
from distributed.client import Client
from distributed.utils import TimeoutError, log_errors
from distributed.worker import get_worker
logger = logging.getLogger(__name__)
class MultiLockExtension:
"""An extension for the scheduler to manage MultiLocks
This adds the following routes to the scheduler
* multi_lock_acquire
* multi_lock_release
The approach is to maintain `self.locks` that maps a lock (unique name given to
`MultiLock(names=, ...)` at creation) to a list of users (instances of `MultiLock`)
that "requests" the lock. Additionally, `self.requests` maps a user to its requested
locks and `self.requests_left` maps a user to the number of locks still need.
Every time a user `x` gets to the front in `self.locks[name] = [x, ...]` it means
that `x` now holds the lock `name` and when it holds all the requested locks
`acquire()` can return.
Finally, `self.events` contains all the events users are waiting on to finish.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
self.locks = defaultdict(list) # lock -> users
self.requests = {} # user -> locks
self.requests_left = {} # user -> locks still needed
self.events = {}
self.scheduler.handlers.update(
{"multi_lock_acquire": self.acquire, "multi_lock_release": self.release}
)
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool:
"""Request locks
Parameters
----------
locks: List[str]
Names of the locks to request.
id: Hashable
Identifier of the `MultiLock` instance requesting the locks.
num_locks: int
Number of locks in `locks` requesting
Return
------
result: bool
Whether `num_locks` requested locks are free immediately or not.
"""
assert id not in self.requests
self.requests[id] = set(locks)
assert len(locks) >= num_locks and num_locks > 0
self.requests_left[id] = num_locks
locks = sorted(locks, key=lambda x: len(self.locks[x]))
for i, lock in enumerate(locks):
self.locks[lock].append(id)
if len(self.locks[lock]) == 1: # The lock was free
self.requests_left[id] -= 1
if self.requests_left[id] == 0: # Got all locks needed
# Since we got all locks need, we can remove the rest of the requests
self.requests[id] -= set(locks[i + 1 :])
return True
return False
def _refain_locks(self, locks, id):
"""Cancel/release previously requested/acquired locks
Parameters
----------
locks: List[str]
Names of the locks to refain.
id: Hashable
Identifier of the `MultiLock` instance refraining the locks.
"""
waiters_ready = set()
for lock in locks:
if self.locks[lock][0] == id:
self.locks[lock].pop(0)
if self.locks[lock]:
new_first = self.locks[lock][0]
self.requests_left[new_first] -= 1
if self.requests_left[new_first] <= 0:
# Notice, `self.requests_left[new_first]` might go below zero
# if more locks are freed than requested.
self.requests_left[new_first] = 0
waiters_ready.add(new_first)
else:
self.locks[lock].remove(id)
assert id not in self.locks[lock]
del self.requests[id]
del self.requests_left[id]
for waiter in waiters_ready:
self.scheduler.loop.add_callback(self.events[waiter].set)
async def acquire(self, locks=None, id=None, timeout=None, num_locks=None):
with log_errors():
if not self._request_locks(locks, id, num_locks):
assert id not in self.events
event = asyncio.Event()
self.events[id] = event
future = event.wait()
if timeout is not None:
future = asyncio.wait_for(future, timeout)
try:
await future
except TimeoutError:
self._refain_locks(locks, id)
return False
finally:
del self.events[id]
# At this point `id` acquired all `locks`
assert self.requests_left[id] == 0
return True
def release(self, id=None):
with log_errors():
self._refain_locks(self.requests[id], id)
class MultiLock:
"""Distributed Centralized Lock
Parameters
----------
names: List[str]
Names of the locks to acquire. Choosing the same name allows two
disconnected processes to coordinate a lock.
client: Client (optional)
Client to use for communication with the scheduler. If not given, the
default global client will be used.
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout=1) # doctest: +SKIP
>>> # do things with protected resource 'x' and 'y'
>>> lock.release() # doctest: +SKIP
"""
def __init__(self, names=[], client=None):
try:
self.client = client or Client.current()
except ValueError:
# Initialise new client
self.client = get_worker().client
self.names = names
self.id = uuid.uuid4().hex
self._locked = False
def acquire(self, blocking=True, timeout=None, num_locks=None):
"""Acquire the lock
Parameters
----------
blocking : bool, optional
If false, don't wait on the lock in the scheduler at all.
timeout : string or number or timedelta, optional
Seconds to wait on the lock in the scheduler. This does not
include local coroutine time, network transfer time, etc..
It is forbidden to specify a timeout when blocking is false.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
num_locks : int, optional
Number of locks needed. If None, all locks are needed
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout="1s") # doctest: +SKIP
Returns
-------
True or False whether or not it successfully acquired the lock
"""
timeout = parse_timedelta(timeout)
if not blocking:
if timeout is not None:
raise ValueError("can't specify a timeout for a non-blocking call")
timeout = 0
result = self.client.sync(
self.client.scheduler.multi_lock_acquire,
locks=self.names,
id=self.id,
timeout=timeout,
num_locks=num_locks or len(self.names),
)
self._locked = True
return result
def release(self):
"""Release the lock if already acquired"""
if not self.locked():
raise ValueError("Lock is not yet acquired")
ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id)
self._locked = False
return ret
def locked(self):
return self._locked
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args, **kwargs):
self.release()
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *args, **kwargs):
await self.release()
def __reduce__(self):
return (type(self), (self.names,))
|
Args
|
Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
|
# -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""service-management operations describe command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.endpoints import common_flags
_ERROR = ('The `service-management operations describe` command has been '
'replaced by `endpoints operations describe` and '
'`services operations describe`.')
@base.Deprecate(is_removed=True, error=_ERROR)
class Describe(base.DescribeCommand):
"""Describes an operation resource for a given operation name."""
# MASKED: Args function (lines 35-55)
def Run(self, args):
"""Stubs 'service-management operations describe'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
"""
pass
|
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
common_flags.operation_flag(suffix='to describe').AddToParser(parser)
parser.display_info.AddFormat(
':(metadata.startTime.date(format="%Y-%m-%d %H:%M:%S %Z", tz=LOCAL)) '
'[transforms] default')
parser.add_argument(
'--full',
action='store_true',
default=False,
help=('Print the entire operation resource, which could be large. '
'By default, a summary will be printed instead.'))
| 35
| 55
|
# -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""service-management operations describe command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.endpoints import common_flags
_ERROR = ('The `service-management operations describe` command has been '
'replaced by `endpoints operations describe` and '
'`services operations describe`.')
@base.Deprecate(is_removed=True, error=_ERROR)
class Describe(base.DescribeCommand):
"""Describes an operation resource for a given operation name."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
common_flags.operation_flag(suffix='to describe').AddToParser(parser)
parser.display_info.AddFormat(
':(metadata.startTime.date(format="%Y-%m-%d %H:%M:%S %Z", tz=LOCAL)) '
'[transforms] default')
parser.add_argument(
'--full',
action='store_true',
default=False,
help=('Print the entire operation resource, which could be large. '
'By default, a summary will be printed instead.'))
def Run(self, args):
"""Stubs 'service-management operations describe'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
"""
pass
|
get
|
Get an existing MachineLearningCompute resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MachineLearningCompute']
class MachineLearningCompute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Machine Learning compute object wrapped into ARM resource envelope.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_name: Name of the Azure Machine Learning compute.
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource.
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]] properties: Compute properties
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the workspace.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['compute_name'] = compute_name
__props__['identity'] = identity
__props__['location'] = location
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['sku'] = sku
__props__['tags'] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200901preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200901preview:MachineLearningCompute")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MachineLearningCompute, __self__).__init__(
'azure-native:machinelearningservices/v20210101:MachineLearningCompute',
resource_name,
__props__,
opts)
# MASKED: get function (lines 86-110)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
Compute properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MachineLearningCompute':
"""
Get an existing MachineLearningCompute resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["identity"] = None
__props__["location"] = None
__props__["name"] = None
__props__["properties"] = None
__props__["sku"] = None
__props__["system_data"] = None
__props__["tags"] = None
__props__["type"] = None
return MachineLearningCompute(resource_name, opts=opts, __props__=__props__)
| 86
| 110
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MachineLearningCompute']
class MachineLearningCompute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Machine Learning compute object wrapped into ARM resource envelope.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_name: Name of the Azure Machine Learning compute.
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource.
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]] properties: Compute properties
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the workspace.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['compute_name'] = compute_name
__props__['identity'] = identity
__props__['location'] = location
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['sku'] = sku
__props__['tags'] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200901preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200901preview:MachineLearningCompute")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MachineLearningCompute, __self__).__init__(
'azure-native:machinelearningservices/v20210101:MachineLearningCompute',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MachineLearningCompute':
"""
Get an existing MachineLearningCompute resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["identity"] = None
__props__["location"] = None
__props__["name"] = None
__props__["properties"] = None
__props__["sku"] = None
__props__["system_data"] = None
__props__["tags"] = None
__props__["type"] = None
return MachineLearningCompute(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
Compute properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
sample_recognize
|
Transcribe a short audio file with multiple channels
Args:
local_file_path Path to local audio file, e.g. /path/audio.wav
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "speech_transcribe_multichannel")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-speech
# sample-metadata
# title: Multi-Channel Audio Transcription (Local File)
# description: Transcribe a short audio file with multiple channels
# usage: python3 samples/v1/speech_transcribe_multichannel.py [--local_file_path "resources/multi.wav"]
# [START speech_transcribe_multichannel]
from google.cloud import speech_v1
import io
# MASKED: sample_recognize function (lines 32-69)
# [END speech_transcribe_multichannel]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--local_file_path", type=str, default="resources/multi.wav")
args = parser.parse_args()
sample_recognize(args.local_file_path)
if __name__ == "__main__":
main()
|
def sample_recognize(local_file_path):
"""
Transcribe a short audio file with multiple channels
Args:
local_file_path Path to local audio file, e.g. /path/audio.wav
"""
client = speech_v1.SpeechClient()
# local_file_path = 'resources/multi.wav'
# The number of channels in the input audio file (optional)
audio_channel_count = 2
# When set to true, each audio channel will be recognized separately.
# The recognition result will contain a channel_tag field to state which
# channel that result belongs to
enable_separate_recognition_per_channel = True
# The language of the supplied audio
language_code = "en-US"
config = {
"audio_channel_count": audio_channel_count,
"enable_separate_recognition_per_channel": enable_separate_recognition_per_channel,
"language_code": language_code,
}
with io.open(local_file_path, "rb") as f:
content = f.read()
audio = {"content": content}
response = client.recognize(config, audio)
for result in response.results:
# channel_tag to recognize which audio channel this result is for
print(u"Channel tag: {}".format(result.channel_tag))
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
| 32
| 69
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "speech_transcribe_multichannel")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-speech
# sample-metadata
# title: Multi-Channel Audio Transcription (Local File)
# description: Transcribe a short audio file with multiple channels
# usage: python3 samples/v1/speech_transcribe_multichannel.py [--local_file_path "resources/multi.wav"]
# [START speech_transcribe_multichannel]
from google.cloud import speech_v1
import io
def sample_recognize(local_file_path):
"""
Transcribe a short audio file with multiple channels
Args:
local_file_path Path to local audio file, e.g. /path/audio.wav
"""
client = speech_v1.SpeechClient()
# local_file_path = 'resources/multi.wav'
# The number of channels in the input audio file (optional)
audio_channel_count = 2
# When set to true, each audio channel will be recognized separately.
# The recognition result will contain a channel_tag field to state which
# channel that result belongs to
enable_separate_recognition_per_channel = True
# The language of the supplied audio
language_code = "en-US"
config = {
"audio_channel_count": audio_channel_count,
"enable_separate_recognition_per_channel": enable_separate_recognition_per_channel,
"language_code": language_code,
}
with io.open(local_file_path, "rb") as f:
content = f.read()
audio = {"content": content}
response = client.recognize(config, audio)
for result in response.results:
# channel_tag to recognize which audio channel this result is for
print(u"Channel tag: {}".format(result.channel_tag))
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_transcribe_multichannel]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--local_file_path", type=str, default="resources/multi.wav")
args = parser.parse_args()
sample_recognize(args.local_file_path)
if __name__ == "__main__":
main()
|
_find_all_hints_in_graph_def
|
Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
# MASKED: _find_all_hints_in_graph_def function (lines 220-256)
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
|
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
| 220
| 256
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
|
convert_op_hints_to_stubs
|
Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
# MASKED: convert_op_hints_to_stubs function (lines 273-304)
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
|
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
| 273
| 304
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
|
__init__
|
Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
# MASKED: __init__ function (lines 106-118)
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
|
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
| 106
| 118
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
|
add_outputs
|
Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
# MASKED: add_outputs function (lines 155-188)
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
|
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
| 155
| 188
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
|
extract
|
Merges bioindex.tsv with the infile (balanced data),
finds the volsplit.zip location for each bio file and
extracts the files into secure_volume/holding_folder.
|
#!/usr/bin/python3
import sys
import os
import shutil
import csv
import zipfile
import pandas as pd
import glob
infile = sys.argv[1]
outfile = sys.argv[2]
# remove holding_folder if it exists, and create new folder
# use 'rm -r /holding_folder/* in shell script instead?'
holding_path = '/media/secure_volume/holding_folder'
if os.path.isdir(holding_path):
shutil.rmtree(holding_path)
os.mkdir(holding_path)
# MASKED: extract function (lines 21-41)
def slicer(outfile):
idx_file_path = '/media/secure_volume/index/bioindex.tsv'
holding_folder_path = '/media/secure_volume/holding_folder/'
bio_idx_df = pd.read_table(idx_file_path)
bio_idx_df.set_index('mainid', inplace = True)
mainid_list = [vol for vol in os.listdir(holding_folder_path) if vol.endswith('.zip')]
# remove '.zip' from file names
mainid_list_clean = [item[0:-4] for item in mainid_list]
#subset bioindex on holding_folder IDs
htid_series = bio_idx_df.htid[mainid_list_clean]
file_path_list = glob.glob(holding_folder_path+'*.zip')
# print('file path list has: ',len(file_path_list))
# print('htid_list has', len(htid_list))
slice_df = pd.DataFrame(htid_series)
slice_df['path'] = file_path_list
slice_df['c'] = 0
slice_df['d'] = 1001
with open(outfile, 'w') as outf:
slice_df.to_csv(outfile, sep='\t', header=False, index=False)
print("Wrote", len(slice_df), "rows to", outfile)
extract(infile)
slicer(outfile)
|
def extract(infile):
'''
Merges bioindex.tsv with the infile (balanced data),
finds the volsplit.zip location for each bio file and
extracts the files into secure_volume/holding_folder.
'''
bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t')
balanced_bioindex = pd.read_table(infile)
for suffix in balanced_bioindex.filesuffix.unique():
volsplit_file = 'volsplit'+str(suffix)+'.zip'
volsplit_df = balanced_bioindex.loc[balanced_bioindex.filesuffix == suffix,:]
try:
with zipfile.ZipFile('/media/secure_volume/'+volsplit_file, 'r') as myzip:
for idx, row in volsplit_df.iterrows():
filename = row['mainid']+'.zip'
myzip.extract(filename, '/media/secure_volume/holding_folder')
except Exception as e:
print('ERROR:',filename,'not found in',volsplit_file,'!', e)
| 21
| 41
|
#!/usr/bin/python3
import sys
import os
import shutil
import csv
import zipfile
import pandas as pd
import glob
infile = sys.argv[1]
outfile = sys.argv[2]
# remove holding_folder if it exists, and create new folder
# use 'rm -r /holding_folder/* in shell script instead?'
holding_path = '/media/secure_volume/holding_folder'
if os.path.isdir(holding_path):
shutil.rmtree(holding_path)
os.mkdir(holding_path)
def extract(infile):
'''
Merges bioindex.tsv with the infile (balanced data),
finds the volsplit.zip location for each bio file and
extracts the files into secure_volume/holding_folder.
'''
bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t')
balanced_bioindex = pd.read_table(infile)
for suffix in balanced_bioindex.filesuffix.unique():
volsplit_file = 'volsplit'+str(suffix)+'.zip'
volsplit_df = balanced_bioindex.loc[balanced_bioindex.filesuffix == suffix,:]
try:
with zipfile.ZipFile('/media/secure_volume/'+volsplit_file, 'r') as myzip:
for idx, row in volsplit_df.iterrows():
filename = row['mainid']+'.zip'
myzip.extract(filename, '/media/secure_volume/holding_folder')
except Exception as e:
print('ERROR:',filename,'not found in',volsplit_file,'!', e)
def slicer(outfile):
idx_file_path = '/media/secure_volume/index/bioindex.tsv'
holding_folder_path = '/media/secure_volume/holding_folder/'
bio_idx_df = pd.read_table(idx_file_path)
bio_idx_df.set_index('mainid', inplace = True)
mainid_list = [vol for vol in os.listdir(holding_folder_path) if vol.endswith('.zip')]
# remove '.zip' from file names
mainid_list_clean = [item[0:-4] for item in mainid_list]
#subset bioindex on holding_folder IDs
htid_series = bio_idx_df.htid[mainid_list_clean]
file_path_list = glob.glob(holding_folder_path+'*.zip')
# print('file path list has: ',len(file_path_list))
# print('htid_list has', len(htid_list))
slice_df = pd.DataFrame(htid_series)
slice_df['path'] = file_path_list
slice_df['c'] = 0
slice_df['d'] = 1001
with open(outfile, 'w') as outf:
slice_df.to_csv(outfile, sep='\t', header=False, index=False)
print("Wrote", len(slice_df), "rows to", outfile)
extract(infile)
slicer(outfile)
|
__init__
|
Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections.
|
"""Connection pooling for psycopg2
This module implements thread-safe (and not) connection pools.
"""
# psycopg/pool.py - pooling code for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions as _ext
class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool(object):
"""Generic key-based pooling code."""
# MASKED: __init__ function (lines 38-58)
def _connect(self, key=None):
"""Create a new connection and assign it to 'key' if not None."""
conn = psycopg2.connect(*self._args, **self._kwargs)
if key is not None:
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn
def _getkey(self):
"""Return a new unique key."""
self._keys += 1
return self._keys
def _getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._getkey()
if key in self._used:
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = key
return conn
else:
if len(self._used) == self.maxconn:
raise PoolError("connection pool exhausted")
return self._connect(key)
def _putconn(self, conn, key=None, close=False):
"""Put away a connection."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._rused.get(id(conn))
if not key:
raise PoolError("trying to put unkeyed connection")
if len(self._pool) < self.minconn and not close:
# Return the connection into a consistent state before putting
# it back into the pool
if not conn.closed:
status = conn.get_transaction_status()
if status == _ext.TRANSACTION_STATUS_UNKNOWN:
# server connection lost
conn.close()
elif status != _ext.TRANSACTION_STATUS_IDLE:
# connection in error or in transaction
conn.rollback()
self._pool.append(conn)
else:
# regular idle connection
self._pool.append(conn)
# If the connection is closed, we just discard it.
else:
conn.close()
# here we check for the presence of key because it can happen that a
# thread tries to put back a connection after a call to close
if not self.closed or key in self._used:
del self._used[key]
del self._rused[id(conn)]
def _closeall(self):
"""Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it.
"""
if self.closed: raise PoolError("connection pool is closed")
for conn in self._pool + list(self._used.values()):
try:
conn.close()
except:
pass
self.closed = True
class SimpleConnectionPool(AbstractConnectionPool):
"""A connection pool that can't be shared across different threads."""
getconn = AbstractConnectionPool._getconn
putconn = AbstractConnectionPool._putconn
closeall = AbstractConnectionPool._closeall
class ThreadedConnectionPool(AbstractConnectionPool):
"""A connection pool that works with the threading module."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
def getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, key=None, close=False):
"""Put away an unused connection."""
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
class PersistentConnectionPool(AbstractConnectionPool):
"""A pool that assigns persistent connections to different threads.
Note that this connection pool generates by itself the required keys
using the current thread id. This means that until a thread puts away
a connection it will always get the same connection object by successive
`!getconn()` calls. This also means that a thread can't use more than one
single connection from the pool.
"""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import warnings
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
DeprecationWarning)
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
# we we'll need the thread module, to determine thread ids, so we
# import it here and copy it in an instance variable
import _thread as _thread # work around for 2to3 bug - see ticket #348
self.__thread = _thread
def getconn(self):
"""Generate thread id and return a connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, close=False):
"""Put away an unused connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
if not conn: conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
|
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections.
"""
self.minconn = int(minconn)
self.maxconn = int(maxconn)
self.closed = False
self._args = args
self._kwargs = kwargs
self._pool = []
self._used = {}
self._rused = {} # id(conn) -> key map
self._keys = 0
for i in range(self.minconn):
self._connect()
| 38
| 58
|
"""Connection pooling for psycopg2
This module implements thread-safe (and not) connection pools.
"""
# psycopg/pool.py - pooling code for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions as _ext
class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool(object):
"""Generic key-based pooling code."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections.
"""
self.minconn = int(minconn)
self.maxconn = int(maxconn)
self.closed = False
self._args = args
self._kwargs = kwargs
self._pool = []
self._used = {}
self._rused = {} # id(conn) -> key map
self._keys = 0
for i in range(self.minconn):
self._connect()
def _connect(self, key=None):
"""Create a new connection and assign it to 'key' if not None."""
conn = psycopg2.connect(*self._args, **self._kwargs)
if key is not None:
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn
def _getkey(self):
"""Return a new unique key."""
self._keys += 1
return self._keys
def _getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._getkey()
if key in self._used:
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = key
return conn
else:
if len(self._used) == self.maxconn:
raise PoolError("connection pool exhausted")
return self._connect(key)
def _putconn(self, conn, key=None, close=False):
"""Put away a connection."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._rused.get(id(conn))
if not key:
raise PoolError("trying to put unkeyed connection")
if len(self._pool) < self.minconn and not close:
# Return the connection into a consistent state before putting
# it back into the pool
if not conn.closed:
status = conn.get_transaction_status()
if status == _ext.TRANSACTION_STATUS_UNKNOWN:
# server connection lost
conn.close()
elif status != _ext.TRANSACTION_STATUS_IDLE:
# connection in error or in transaction
conn.rollback()
self._pool.append(conn)
else:
# regular idle connection
self._pool.append(conn)
# If the connection is closed, we just discard it.
else:
conn.close()
# here we check for the presence of key because it can happen that a
# thread tries to put back a connection after a call to close
if not self.closed or key in self._used:
del self._used[key]
del self._rused[id(conn)]
def _closeall(self):
"""Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it.
"""
if self.closed: raise PoolError("connection pool is closed")
for conn in self._pool + list(self._used.values()):
try:
conn.close()
except:
pass
self.closed = True
class SimpleConnectionPool(AbstractConnectionPool):
"""A connection pool that can't be shared across different threads."""
getconn = AbstractConnectionPool._getconn
putconn = AbstractConnectionPool._putconn
closeall = AbstractConnectionPool._closeall
class ThreadedConnectionPool(AbstractConnectionPool):
"""A connection pool that works with the threading module."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
def getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, key=None, close=False):
"""Put away an unused connection."""
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
class PersistentConnectionPool(AbstractConnectionPool):
"""A pool that assigns persistent connections to different threads.
Note that this connection pool generates by itself the required keys
using the current thread id. This means that until a thread puts away
a connection it will always get the same connection object by successive
`!getconn()` calls. This also means that a thread can't use more than one
single connection from the pool.
"""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import warnings
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
DeprecationWarning)
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
# we we'll need the thread module, to determine thread ids, so we
# import it here and copy it in an instance variable
import _thread as _thread # work around for 2to3 bug - see ticket #348
self.__thread = _thread
def getconn(self):
"""Generate thread id and return a connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, close=False):
"""Put away an unused connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
if not conn: conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
|
_closeall
|
Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it.
|
"""Connection pooling for psycopg2
This module implements thread-safe (and not) connection pools.
"""
# psycopg/pool.py - pooling code for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions as _ext
class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool(object):
"""Generic key-based pooling code."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections.
"""
self.minconn = int(minconn)
self.maxconn = int(maxconn)
self.closed = False
self._args = args
self._kwargs = kwargs
self._pool = []
self._used = {}
self._rused = {} # id(conn) -> key map
self._keys = 0
for i in range(self.minconn):
self._connect()
def _connect(self, key=None):
"""Create a new connection and assign it to 'key' if not None."""
conn = psycopg2.connect(*self._args, **self._kwargs)
if key is not None:
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn
def _getkey(self):
"""Return a new unique key."""
self._keys += 1
return self._keys
def _getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._getkey()
if key in self._used:
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = key
return conn
else:
if len(self._used) == self.maxconn:
raise PoolError("connection pool exhausted")
return self._connect(key)
def _putconn(self, conn, key=None, close=False):
"""Put away a connection."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._rused.get(id(conn))
if not key:
raise PoolError("trying to put unkeyed connection")
if len(self._pool) < self.minconn and not close:
# Return the connection into a consistent state before putting
# it back into the pool
if not conn.closed:
status = conn.get_transaction_status()
if status == _ext.TRANSACTION_STATUS_UNKNOWN:
# server connection lost
conn.close()
elif status != _ext.TRANSACTION_STATUS_IDLE:
# connection in error or in transaction
conn.rollback()
self._pool.append(conn)
else:
# regular idle connection
self._pool.append(conn)
# If the connection is closed, we just discard it.
else:
conn.close()
# here we check for the presence of key because it can happen that a
# thread tries to put back a connection after a call to close
if not self.closed or key in self._used:
del self._used[key]
del self._rused[id(conn)]
# MASKED: _closeall function (lines 125-138)
class SimpleConnectionPool(AbstractConnectionPool):
"""A connection pool that can't be shared across different threads."""
getconn = AbstractConnectionPool._getconn
putconn = AbstractConnectionPool._putconn
closeall = AbstractConnectionPool._closeall
class ThreadedConnectionPool(AbstractConnectionPool):
"""A connection pool that works with the threading module."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
def getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, key=None, close=False):
"""Put away an unused connection."""
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
class PersistentConnectionPool(AbstractConnectionPool):
"""A pool that assigns persistent connections to different threads.
Note that this connection pool generates by itself the required keys
using the current thread id. This means that until a thread puts away
a connection it will always get the same connection object by successive
`!getconn()` calls. This also means that a thread can't use more than one
single connection from the pool.
"""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import warnings
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
DeprecationWarning)
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
# we we'll need the thread module, to determine thread ids, so we
# import it here and copy it in an instance variable
import _thread as _thread # work around for 2to3 bug - see ticket #348
self.__thread = _thread
def getconn(self):
"""Generate thread id and return a connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, close=False):
"""Put away an unused connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
if not conn: conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
|
def _closeall(self):
"""Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it.
"""
if self.closed: raise PoolError("connection pool is closed")
for conn in self._pool + list(self._used.values()):
try:
conn.close()
except:
pass
self.closed = True
| 125
| 138
|
"""Connection pooling for psycopg2
This module implements thread-safe (and not) connection pools.
"""
# psycopg/pool.py - pooling code for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions as _ext
class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool(object):
"""Generic key-based pooling code."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections.
"""
self.minconn = int(minconn)
self.maxconn = int(maxconn)
self.closed = False
self._args = args
self._kwargs = kwargs
self._pool = []
self._used = {}
self._rused = {} # id(conn) -> key map
self._keys = 0
for i in range(self.minconn):
self._connect()
def _connect(self, key=None):
"""Create a new connection and assign it to 'key' if not None."""
conn = psycopg2.connect(*self._args, **self._kwargs)
if key is not None:
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn
def _getkey(self):
"""Return a new unique key."""
self._keys += 1
return self._keys
def _getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._getkey()
if key in self._used:
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = key
return conn
else:
if len(self._used) == self.maxconn:
raise PoolError("connection pool exhausted")
return self._connect(key)
def _putconn(self, conn, key=None, close=False):
"""Put away a connection."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._rused.get(id(conn))
if not key:
raise PoolError("trying to put unkeyed connection")
if len(self._pool) < self.minconn and not close:
# Return the connection into a consistent state before putting
# it back into the pool
if not conn.closed:
status = conn.get_transaction_status()
if status == _ext.TRANSACTION_STATUS_UNKNOWN:
# server connection lost
conn.close()
elif status != _ext.TRANSACTION_STATUS_IDLE:
# connection in error or in transaction
conn.rollback()
self._pool.append(conn)
else:
# regular idle connection
self._pool.append(conn)
# If the connection is closed, we just discard it.
else:
conn.close()
# here we check for the presence of key because it can happen that a
# thread tries to put back a connection after a call to close
if not self.closed or key in self._used:
del self._used[key]
del self._rused[id(conn)]
def _closeall(self):
"""Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it.
"""
if self.closed: raise PoolError("connection pool is closed")
for conn in self._pool + list(self._used.values()):
try:
conn.close()
except:
pass
self.closed = True
class SimpleConnectionPool(AbstractConnectionPool):
"""A connection pool that can't be shared across different threads."""
getconn = AbstractConnectionPool._getconn
putconn = AbstractConnectionPool._putconn
closeall = AbstractConnectionPool._closeall
class ThreadedConnectionPool(AbstractConnectionPool):
"""A connection pool that works with the threading module."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
def getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, key=None, close=False):
"""Put away an unused connection."""
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
class PersistentConnectionPool(AbstractConnectionPool):
"""A pool that assigns persistent connections to different threads.
Note that this connection pool generates by itself the required keys
using the current thread id. This means that until a thread puts away
a connection it will always get the same connection object by successive
`!getconn()` calls. This also means that a thread can't use more than one
single connection from the pool.
"""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import warnings
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
DeprecationWarning)
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
# we we'll need the thread module, to determine thread ids, so we
# import it here and copy it in an instance variable
import _thread as _thread # work around for 2to3 bug - see ticket #348
self.__thread = _thread
def getconn(self):
"""Generate thread id and return a connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, close=False):
"""Put away an unused connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
if not conn: conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
|
stock_zh_a_spot
|
从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: pandas.DataFrame
symbol code name trade pricechange changepercent buy 0 sh600000 600000 浦发银行 12.920 -0.030 -0.232 12.920
1 sh600004 600004 白云机场 18.110 -0.370 -2.002 18.110
2 sh600006 600006 东风汽车 4.410 -0.030 -0.676 4.410
3 sh600007 600007 中国国贸 17.240 -0.360 -2.045 17.240
4 sh600008 600008 首创股份 3.320 -0.030 -0.896 3.310
... ... ... ... ... ... ...
3755 sh600096 600096 云天化 5.270 -0.220 -4.007 5.270
3756 sh600097 600097 开创国际 10.180 -0.120 -1.165 10.180
3757 sh600098 600098 广州发展 6.550 -0.040 -0.607 6.540
3758 sh600099 600099 林海股份 6.540 -0.150 -2.242 6.540
3759 sh600100 600100 同方股份 8.200 -0.100 -1.205 8.200
sell settlement open high low volume amount 0 12.930 12.950 12.950 13.100 12.860 46023920 597016896
1 18.120 18.480 18.510 18.510 17.880 24175071 437419344
2 4.420 4.440 4.490 4.490 4.410 4304900 19130233
3 17.280 17.600 17.670 17.670 17.220 684801 11879731
4 3.320 3.350 3.360 3.360 3.300 8284294 27579688
... ... ... ... ... ... ...
3755 5.280 5.490 5.490 5.500 5.220 16964636 90595172
3756 10.190 10.300 10.220 10.340 10.090 1001676 10231669
3757 6.550 6.590 6.560 6.620 6.500 1996449 13098901
3758 6.580 6.690 6.650 6.680 6.530 1866180 12314997
3759 8.210 8.300 8.300 8.310 8.120 12087236 99281447
ticktime per pb mktcap nmc turnoverratio
0 15:00:00 6.984 0.790 3.792289e+07 3.631006e+07 0.16376
1 15:00:07 32.927 2.365 3.747539e+06 3.747539e+06 1.16826
2 15:00:02 15.926 1.207 8.820000e+05 8.820000e+05 0.21525
3 15:00:02 22.390 2.367 1.736555e+06 1.736555e+06 0.06798
4 15:00:07 22.912 1.730 1.887569e+06 1.600444e+06 0.17185
... ... ... ... ... ...
3755 15:00:00 56.728 1.566 7.523847e+05 6.963668e+05 1.28386
3756 15:00:00 17.552 1.434 2.452734e+05 2.303459e+05 0.44268
3757 15:00:00 25.476 1.059 1.785659e+06 1.785659e+06 0.07323
3758 15:00:00 540.496 3.023 1.433045e+05 1.433045e+05 0.85167
3759 15:00:07 -6.264 1.465 2.430397e+06 2.430397e+06 0.40782
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/30 11:28
Desc: 新浪财经-A股-实时行情数据和历史行情数据(包含前复权和后复权因子)
"""
import re
import demjson
import execjs
import pandas as pd
import requests
from tqdm import tqdm
from akshare.stock.cons import (zh_sina_a_stock_payload,
zh_sina_a_stock_url,
zh_sina_a_stock_count_url,
zh_sina_a_stock_hist_url,
hk_js_decode,
zh_sina_a_stock_hfq_url,
zh_sina_a_stock_qfq_url,
zh_sina_a_stock_amount_url)
def _get_zh_a_page_count() -> int:
"""
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 需要抓取的股票总页数
:rtype: int
"""
res = requests.get(zh_sina_a_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
# MASKED: stock_zh_a_spot function (lines 40-94)
def stock_zh_a_daily(symbol: str = "sz000613", adjust: str = "qfq") -> pd.DataFrame:
"""
新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = execjs.compile(hk_js_decode)
dict_list = js_code.call(
'd', res.text.split("=")[1].split(";")[0].replace(
'"', "")) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df["date"] = data_df["date"].str.split("T", expand=True).iloc[:, 0]
data_df.index = pd.to_datetime(data_df["date"])
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("["): r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(data_df, amount_data_df, left_index=True, right_index=True, how="left")
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = ['open', 'high', 'low', 'close', 'volume', 'outstanding_share', 'turnover']
if adjust == "":
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "hfq-factor":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
return qfq_factor_df
if __name__ == "__main__":
stock_zh_a_daily_hfq_df = stock_zh_a_daily(symbol="sh600582", adjust="qfq-factor")
print(stock_zh_a_daily_hfq_df)
stock_zh_a_daily_df = stock_zh_a_daily(symbol="sz000613", adjust="qfq")
print(stock_zh_a_daily_df)
stock_zh_a_spot_df = stock_zh_a_spot()
print(stock_zh_a_spot_df)
|
def stock_zh_a_spot() -> pd.DataFrame:
"""
从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: pandas.DataFrame
symbol code name trade pricechange changepercent buy \
0 sh600000 600000 浦发银行 12.920 -0.030 -0.232 12.920
1 sh600004 600004 白云机场 18.110 -0.370 -2.002 18.110
2 sh600006 600006 东风汽车 4.410 -0.030 -0.676 4.410
3 sh600007 600007 中国国贸 17.240 -0.360 -2.045 17.240
4 sh600008 600008 首创股份 3.320 -0.030 -0.896 3.310
... ... ... ... ... ... ...
3755 sh600096 600096 云天化 5.270 -0.220 -4.007 5.270
3756 sh600097 600097 开创国际 10.180 -0.120 -1.165 10.180
3757 sh600098 600098 广州发展 6.550 -0.040 -0.607 6.540
3758 sh600099 600099 林海股份 6.540 -0.150 -2.242 6.540
3759 sh600100 600100 同方股份 8.200 -0.100 -1.205 8.200
sell settlement open high low volume amount \
0 12.930 12.950 12.950 13.100 12.860 46023920 597016896
1 18.120 18.480 18.510 18.510 17.880 24175071 437419344
2 4.420 4.440 4.490 4.490 4.410 4304900 19130233
3 17.280 17.600 17.670 17.670 17.220 684801 11879731
4 3.320 3.350 3.360 3.360 3.300 8284294 27579688
... ... ... ... ... ... ...
3755 5.280 5.490 5.490 5.500 5.220 16964636 90595172
3756 10.190 10.300 10.220 10.340 10.090 1001676 10231669
3757 6.550 6.590 6.560 6.620 6.500 1996449 13098901
3758 6.580 6.690 6.650 6.680 6.530 1866180 12314997
3759 8.210 8.300 8.300 8.310 8.120 12087236 99281447
ticktime per pb mktcap nmc turnoverratio
0 15:00:00 6.984 0.790 3.792289e+07 3.631006e+07 0.16376
1 15:00:07 32.927 2.365 3.747539e+06 3.747539e+06 1.16826
2 15:00:02 15.926 1.207 8.820000e+05 8.820000e+05 0.21525
3 15:00:02 22.390 2.367 1.736555e+06 1.736555e+06 0.06798
4 15:00:07 22.912 1.730 1.887569e+06 1.600444e+06 0.17185
... ... ... ... ... ...
3755 15:00:00 56.728 1.566 7.523847e+05 6.963668e+05 1.28386
3756 15:00:00 17.552 1.434 2.452734e+05 2.303459e+05 0.44268
3757 15:00:00 25.476 1.059 1.785659e+06 1.785659e+06 0.07323
3758 15:00:00 540.496 3.023 1.433045e+05 1.433045e+05 0.85167
3759 15:00:07 -6.264 1.465 2.430397e+06 2.430397e+06 0.40782
"""
big_df = pd.DataFrame()
page_count = _get_zh_a_page_count()
zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy()
for page in tqdm(range(1, page_count+1), desc="Please wait for a moment"):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(
zh_sina_a_stock_url,
params=zh_sina_stock_payload_copy)
data_json = demjson.decode(r.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
return big_df
| 40
| 94
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/30 11:28
Desc: 新浪财经-A股-实时行情数据和历史行情数据(包含前复权和后复权因子)
"""
import re
import demjson
import execjs
import pandas as pd
import requests
from tqdm import tqdm
from akshare.stock.cons import (zh_sina_a_stock_payload,
zh_sina_a_stock_url,
zh_sina_a_stock_count_url,
zh_sina_a_stock_hist_url,
hk_js_decode,
zh_sina_a_stock_hfq_url,
zh_sina_a_stock_qfq_url,
zh_sina_a_stock_amount_url)
def _get_zh_a_page_count() -> int:
"""
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 需要抓取的股票总页数
:rtype: int
"""
res = requests.get(zh_sina_a_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def stock_zh_a_spot() -> pd.DataFrame:
"""
从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: pandas.DataFrame
symbol code name trade pricechange changepercent buy \
0 sh600000 600000 浦发银行 12.920 -0.030 -0.232 12.920
1 sh600004 600004 白云机场 18.110 -0.370 -2.002 18.110
2 sh600006 600006 东风汽车 4.410 -0.030 -0.676 4.410
3 sh600007 600007 中国国贸 17.240 -0.360 -2.045 17.240
4 sh600008 600008 首创股份 3.320 -0.030 -0.896 3.310
... ... ... ... ... ... ...
3755 sh600096 600096 云天化 5.270 -0.220 -4.007 5.270
3756 sh600097 600097 开创国际 10.180 -0.120 -1.165 10.180
3757 sh600098 600098 广州发展 6.550 -0.040 -0.607 6.540
3758 sh600099 600099 林海股份 6.540 -0.150 -2.242 6.540
3759 sh600100 600100 同方股份 8.200 -0.100 -1.205 8.200
sell settlement open high low volume amount \
0 12.930 12.950 12.950 13.100 12.860 46023920 597016896
1 18.120 18.480 18.510 18.510 17.880 24175071 437419344
2 4.420 4.440 4.490 4.490 4.410 4304900 19130233
3 17.280 17.600 17.670 17.670 17.220 684801 11879731
4 3.320 3.350 3.360 3.360 3.300 8284294 27579688
... ... ... ... ... ... ...
3755 5.280 5.490 5.490 5.500 5.220 16964636 90595172
3756 10.190 10.300 10.220 10.340 10.090 1001676 10231669
3757 6.550 6.590 6.560 6.620 6.500 1996449 13098901
3758 6.580 6.690 6.650 6.680 6.530 1866180 12314997
3759 8.210 8.300 8.300 8.310 8.120 12087236 99281447
ticktime per pb mktcap nmc turnoverratio
0 15:00:00 6.984 0.790 3.792289e+07 3.631006e+07 0.16376
1 15:00:07 32.927 2.365 3.747539e+06 3.747539e+06 1.16826
2 15:00:02 15.926 1.207 8.820000e+05 8.820000e+05 0.21525
3 15:00:02 22.390 2.367 1.736555e+06 1.736555e+06 0.06798
4 15:00:07 22.912 1.730 1.887569e+06 1.600444e+06 0.17185
... ... ... ... ... ...
3755 15:00:00 56.728 1.566 7.523847e+05 6.963668e+05 1.28386
3756 15:00:00 17.552 1.434 2.452734e+05 2.303459e+05 0.44268
3757 15:00:00 25.476 1.059 1.785659e+06 1.785659e+06 0.07323
3758 15:00:00 540.496 3.023 1.433045e+05 1.433045e+05 0.85167
3759 15:00:07 -6.264 1.465 2.430397e+06 2.430397e+06 0.40782
"""
big_df = pd.DataFrame()
page_count = _get_zh_a_page_count()
zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy()
for page in tqdm(range(1, page_count+1), desc="Please wait for a moment"):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(
zh_sina_a_stock_url,
params=zh_sina_stock_payload_copy)
data_json = demjson.decode(r.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
return big_df
def stock_zh_a_daily(symbol: str = "sz000613", adjust: str = "qfq") -> pd.DataFrame:
"""
新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = execjs.compile(hk_js_decode)
dict_list = js_code.call(
'd', res.text.split("=")[1].split(";")[0].replace(
'"', "")) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df["date"] = data_df["date"].str.split("T", expand=True).iloc[:, 0]
data_df.index = pd.to_datetime(data_df["date"])
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("["): r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(data_df, amount_data_df, left_index=True, right_index=True, how="left")
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = ['open', 'high', 'low', 'close', 'volume', 'outstanding_share', 'turnover']
if adjust == "":
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "hfq-factor":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
return qfq_factor_df
if __name__ == "__main__":
stock_zh_a_daily_hfq_df = stock_zh_a_daily(symbol="sh600582", adjust="qfq-factor")
print(stock_zh_a_daily_hfq_df)
stock_zh_a_daily_df = stock_zh_a_daily(symbol="sz000613", adjust="qfq")
print(stock_zh_a_daily_df)
stock_zh_a_spot_df = stock_zh_a_spot()
print(stock_zh_a_spot_df)
|
gae_returns
|
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
# MASKED: gae_returns function (lines 895-917)
|
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
| 895
| 917
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
|
__init__
|
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
# MASKED: __init__ function (lines 247-340)
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
|
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
| 247
| 340
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
|
add_data
|
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
# MASKED: add_data function (lines 484-521)
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
|
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
| 484
| 521
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
|
convert
|
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
# MASKED: convert function (lines 555-569)
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
|
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
| 555
| 569
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
|
split_ordered_batches
|
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
# MASKED: split_ordered_batches function (lines 636-664)
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
|
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
| 636
| 664
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
|
vmin
|
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
|
from collections import OrderedDict
from sympy import Basic, true
from devito.tools import as_tuple, is_integer, memoized_meth
from devito.types import Dimension
__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax']
class Vector(tuple):
"""
A representation of an object in Z^n.
The elements of a Vector can be integers or generic SymPy expressions.
Notes
-----
1) Vector-scalar comparison
If a comparison between a vector and a non-vector is attempted, then the
non-vector is promoted to a vector; if this is not possible, an exception
is raised. This is handy because it turns a vector-scalar comparison into
a vector-vector comparison with the scalar broadcasted to all vector entries.
For example: ::
(3, 4, 5) > 4 => (3, 4, 5) > (4, 4, 4) => False
2) Comparing Vector entries when these are SymPy expressions
When we compare two symbolic (SymPy expressions) entries, it might not be
possible to determine the truth value of the relation. For example, the
truth value of `3*i < 4*j` cannot be determined (unless some information
about `i` and `j` is available). In some cases, however, the comparison is
feasible; for example, `i + 4 < i` is definitely False. A sufficient condition
for two Vectors to be comparable is that their pair-wise indices are affine
functions of the same variables, with identical coefficient.
If the Vector is instantiated passing the keyword argument ``smart = True``,
some manipulation will be attempted to infer the truth value of a non-trivial
symbolic relation. This increases the cost of the comparison, while potentially
being ineffective, so use it judiciously. By default, ``smart = False``.
Raises
------
TypeError
If two Vectors cannot be compared, e.g. due to incomparable symbolic entries.
"""
def __new__(cls, *items, smart=False):
if not all(is_integer(i) or isinstance(i, Basic) for i in items):
raise TypeError("Illegal Vector element type")
obj = super(Vector, cls).__new__(cls, items)
obj.smart = smart
return obj
def _asvector(relax=False):
def __asvector(func):
def wrapper(self, other):
if not isinstance(other, Vector):
try:
other = Vector(*other)
except TypeError:
# Not iterable
other = Vector(*(as_tuple(other)*len(self)))
if relax is False and len(self) != len(other):
raise TypeError("Cannot operate with Vectors of different rank")
return func(self, other)
return wrapper
return __asvector
def __hash__(self):
return super(Vector, self).__hash__()
@_asvector()
def __add__(self, other):
return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __radd__(self, other):
return self + other
@_asvector()
def __sub__(self, other):
return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __rsub__(self, other):
return self - other
@_asvector(relax=True)
def __eq__(self, other):
return super(Vector, self).__eq__(other)
@_asvector(relax=True)
def __ne__(self, other):
return super(Vector, self).__ne__(other)
@_asvector()
def __lt__(self, other):
# This might raise an exception if the distance between the i-th entry
# of `self` and `other` isn't integer, but rather a generic expression
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i < 0` is generally False, so __lt__ must
# return False
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
@_asvector()
def __gt__(self, other):
return other.__lt__(self)
@_asvector()
def __le__(self, other):
if self.__eq__(other):
return True
# We cannot simply resort to `__lt__` as it might happen that:
# * v0 < v1 --> False
# * v0 == v1 --> False
# But
# * v0 <= v1 --> True
#
# For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached
# the property that definitely `a >= 0`, then surely `v1 <= v0`, even
# though it can't be assumed anything about `v1 < 0` and `v1 == v0`
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
continue
elif (i > 0) == true:
return False
elif (i >= 0) == true:
# See analogous considerations in __lt__
return False
raise TypeError("Non-comparable index functions")
# Note: unlike `__lt__`, if we end up here, then *it is* <=. For example,
# with `v0` and `v1` as above, we would get here
return True
@_asvector()
def __ge__(self, other):
return other.__le__(self)
def __getitem__(self, key):
ret = super(Vector, self).__getitem__(key)
return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret
def __repr__(self):
return "(%s)" % ','.join(str(i) for i in self)
@property
def rank(self):
return len(self)
@property
def sum(self):
return sum(self)
@property
def is_constant(self):
return all(is_integer(i) for i in self)
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.
"""
return self - other
class LabeledVector(Vector):
"""
A Vector that associates a Dimension to each element.
"""
def __new__(cls, items=None):
try:
labels, values = zip(*items)
except (ValueError, TypeError):
labels, values = (), ()
if not all(isinstance(i, Dimension) for i in labels):
raise ValueError("All labels must be of type Dimension, got [%s]"
% ','.join(i.__class__.__name__ for i in labels))
obj = super(LabeledVector, cls).__new__(cls, *values)
obj.labels = labels
return obj
@classmethod
def transpose(cls, *vectors):
"""
Transpose a matrix represented as an iterable of homogeneous LabeledVectors.
"""
if len(vectors) == 0:
return LabeledVector()
if not all(isinstance(v, LabeledVector) for v in vectors):
raise ValueError("All items must be of type LabeledVector, got [%s]"
% ','.join(i.__class__.__name__ for i in vectors))
T = OrderedDict()
for v in vectors:
for l, i in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple((l, Vector(*i)) for l, i in T.items())
def __repr__(self):
return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self))
def __hash__(self):
return hash((tuple(self), self.labels))
def __eq__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __getitem__(self, index):
if isinstance(index, (slice, int)):
return super(LabeledVector, self).__getitem__(index)
elif isinstance(index, Dimension):
for d in index._defines:
if d in self.labels:
i = self.labels.index(d)
return super(LabeledVector, self).__getitem__(i)
return None
else:
raise TypeError("Indices must be integers, slices, or Dimensions, not %s"
% type(index))
def fromlabel(self, label, v=None):
return self[label] if label in self.labels else v
def items(self):
return zip(self.labels, self)
@memoized_meth
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed.
"""
if not isinstance(other, LabeledVector):
raise TypeError("Cannot compute distance from obj of type %s", type(other))
if self.labels != other.labels:
raise TypeError("Cannot compute distance due to mismatching `labels`")
return LabeledVector(list(zip(self.labels, self - other)))
# Utility functions
# MASKED: vmin function (lines 317-336)
def vmax(*vectors):
"""
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i > ret or i >= ret:
ret = i
return ret
|
def vmin(*vectors):
"""
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i < ret or i <= ret:
ret = i
return ret
| 317
| 336
|
from collections import OrderedDict
from sympy import Basic, true
from devito.tools import as_tuple, is_integer, memoized_meth
from devito.types import Dimension
__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax']
class Vector(tuple):
"""
A representation of an object in Z^n.
The elements of a Vector can be integers or generic SymPy expressions.
Notes
-----
1) Vector-scalar comparison
If a comparison between a vector and a non-vector is attempted, then the
non-vector is promoted to a vector; if this is not possible, an exception
is raised. This is handy because it turns a vector-scalar comparison into
a vector-vector comparison with the scalar broadcasted to all vector entries.
For example: ::
(3, 4, 5) > 4 => (3, 4, 5) > (4, 4, 4) => False
2) Comparing Vector entries when these are SymPy expressions
When we compare two symbolic (SymPy expressions) entries, it might not be
possible to determine the truth value of the relation. For example, the
truth value of `3*i < 4*j` cannot be determined (unless some information
about `i` and `j` is available). In some cases, however, the comparison is
feasible; for example, `i + 4 < i` is definitely False. A sufficient condition
for two Vectors to be comparable is that their pair-wise indices are affine
functions of the same variables, with identical coefficient.
If the Vector is instantiated passing the keyword argument ``smart = True``,
some manipulation will be attempted to infer the truth value of a non-trivial
symbolic relation. This increases the cost of the comparison, while potentially
being ineffective, so use it judiciously. By default, ``smart = False``.
Raises
------
TypeError
If two Vectors cannot be compared, e.g. due to incomparable symbolic entries.
"""
def __new__(cls, *items, smart=False):
if not all(is_integer(i) or isinstance(i, Basic) for i in items):
raise TypeError("Illegal Vector element type")
obj = super(Vector, cls).__new__(cls, items)
obj.smart = smart
return obj
def _asvector(relax=False):
def __asvector(func):
def wrapper(self, other):
if not isinstance(other, Vector):
try:
other = Vector(*other)
except TypeError:
# Not iterable
other = Vector(*(as_tuple(other)*len(self)))
if relax is False and len(self) != len(other):
raise TypeError("Cannot operate with Vectors of different rank")
return func(self, other)
return wrapper
return __asvector
def __hash__(self):
return super(Vector, self).__hash__()
@_asvector()
def __add__(self, other):
return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __radd__(self, other):
return self + other
@_asvector()
def __sub__(self, other):
return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __rsub__(self, other):
return self - other
@_asvector(relax=True)
def __eq__(self, other):
return super(Vector, self).__eq__(other)
@_asvector(relax=True)
def __ne__(self, other):
return super(Vector, self).__ne__(other)
@_asvector()
def __lt__(self, other):
# This might raise an exception if the distance between the i-th entry
# of `self` and `other` isn't integer, but rather a generic expression
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i < 0` is generally False, so __lt__ must
# return False
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
@_asvector()
def __gt__(self, other):
return other.__lt__(self)
@_asvector()
def __le__(self, other):
if self.__eq__(other):
return True
# We cannot simply resort to `__lt__` as it might happen that:
# * v0 < v1 --> False
# * v0 == v1 --> False
# But
# * v0 <= v1 --> True
#
# For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached
# the property that definitely `a >= 0`, then surely `v1 <= v0`, even
# though it can't be assumed anything about `v1 < 0` and `v1 == v0`
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
continue
elif (i > 0) == true:
return False
elif (i >= 0) == true:
# See analogous considerations in __lt__
return False
raise TypeError("Non-comparable index functions")
# Note: unlike `__lt__`, if we end up here, then *it is* <=. For example,
# with `v0` and `v1` as above, we would get here
return True
@_asvector()
def __ge__(self, other):
return other.__le__(self)
def __getitem__(self, key):
ret = super(Vector, self).__getitem__(key)
return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret
def __repr__(self):
return "(%s)" % ','.join(str(i) for i in self)
@property
def rank(self):
return len(self)
@property
def sum(self):
return sum(self)
@property
def is_constant(self):
return all(is_integer(i) for i in self)
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.
"""
return self - other
class LabeledVector(Vector):
"""
A Vector that associates a Dimension to each element.
"""
def __new__(cls, items=None):
try:
labels, values = zip(*items)
except (ValueError, TypeError):
labels, values = (), ()
if not all(isinstance(i, Dimension) for i in labels):
raise ValueError("All labels must be of type Dimension, got [%s]"
% ','.join(i.__class__.__name__ for i in labels))
obj = super(LabeledVector, cls).__new__(cls, *values)
obj.labels = labels
return obj
@classmethod
def transpose(cls, *vectors):
"""
Transpose a matrix represented as an iterable of homogeneous LabeledVectors.
"""
if len(vectors) == 0:
return LabeledVector()
if not all(isinstance(v, LabeledVector) for v in vectors):
raise ValueError("All items must be of type LabeledVector, got [%s]"
% ','.join(i.__class__.__name__ for i in vectors))
T = OrderedDict()
for v in vectors:
for l, i in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple((l, Vector(*i)) for l, i in T.items())
def __repr__(self):
return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self))
def __hash__(self):
return hash((tuple(self), self.labels))
def __eq__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __getitem__(self, index):
if isinstance(index, (slice, int)):
return super(LabeledVector, self).__getitem__(index)
elif isinstance(index, Dimension):
for d in index._defines:
if d in self.labels:
i = self.labels.index(d)
return super(LabeledVector, self).__getitem__(i)
return None
else:
raise TypeError("Indices must be integers, slices, or Dimensions, not %s"
% type(index))
def fromlabel(self, label, v=None):
return self[label] if label in self.labels else v
def items(self):
return zip(self.labels, self)
@memoized_meth
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed.
"""
if not isinstance(other, LabeledVector):
raise TypeError("Cannot compute distance from obj of type %s", type(other))
if self.labels != other.labels:
raise TypeError("Cannot compute distance due to mismatching `labels`")
return LabeledVector(list(zip(self.labels, self - other)))
# Utility functions
def vmin(*vectors):
"""
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i < ret or i <= ret:
ret = i
return ret
def vmax(*vectors):
"""
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i > ret or i >= ret:
ret = i
return ret
|
vmax
|
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
|
from collections import OrderedDict
from sympy import Basic, true
from devito.tools import as_tuple, is_integer, memoized_meth
from devito.types import Dimension
__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax']
class Vector(tuple):
"""
A representation of an object in Z^n.
The elements of a Vector can be integers or generic SymPy expressions.
Notes
-----
1) Vector-scalar comparison
If a comparison between a vector and a non-vector is attempted, then the
non-vector is promoted to a vector; if this is not possible, an exception
is raised. This is handy because it turns a vector-scalar comparison into
a vector-vector comparison with the scalar broadcasted to all vector entries.
For example: ::
(3, 4, 5) > 4 => (3, 4, 5) > (4, 4, 4) => False
2) Comparing Vector entries when these are SymPy expressions
When we compare two symbolic (SymPy expressions) entries, it might not be
possible to determine the truth value of the relation. For example, the
truth value of `3*i < 4*j` cannot be determined (unless some information
about `i` and `j` is available). In some cases, however, the comparison is
feasible; for example, `i + 4 < i` is definitely False. A sufficient condition
for two Vectors to be comparable is that their pair-wise indices are affine
functions of the same variables, with identical coefficient.
If the Vector is instantiated passing the keyword argument ``smart = True``,
some manipulation will be attempted to infer the truth value of a non-trivial
symbolic relation. This increases the cost of the comparison, while potentially
being ineffective, so use it judiciously. By default, ``smart = False``.
Raises
------
TypeError
If two Vectors cannot be compared, e.g. due to incomparable symbolic entries.
"""
def __new__(cls, *items, smart=False):
if not all(is_integer(i) or isinstance(i, Basic) for i in items):
raise TypeError("Illegal Vector element type")
obj = super(Vector, cls).__new__(cls, items)
obj.smart = smart
return obj
def _asvector(relax=False):
def __asvector(func):
def wrapper(self, other):
if not isinstance(other, Vector):
try:
other = Vector(*other)
except TypeError:
# Not iterable
other = Vector(*(as_tuple(other)*len(self)))
if relax is False and len(self) != len(other):
raise TypeError("Cannot operate with Vectors of different rank")
return func(self, other)
return wrapper
return __asvector
def __hash__(self):
return super(Vector, self).__hash__()
@_asvector()
def __add__(self, other):
return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __radd__(self, other):
return self + other
@_asvector()
def __sub__(self, other):
return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __rsub__(self, other):
return self - other
@_asvector(relax=True)
def __eq__(self, other):
return super(Vector, self).__eq__(other)
@_asvector(relax=True)
def __ne__(self, other):
return super(Vector, self).__ne__(other)
@_asvector()
def __lt__(self, other):
# This might raise an exception if the distance between the i-th entry
# of `self` and `other` isn't integer, but rather a generic expression
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i < 0` is generally False, so __lt__ must
# return False
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
@_asvector()
def __gt__(self, other):
return other.__lt__(self)
@_asvector()
def __le__(self, other):
if self.__eq__(other):
return True
# We cannot simply resort to `__lt__` as it might happen that:
# * v0 < v1 --> False
# * v0 == v1 --> False
# But
# * v0 <= v1 --> True
#
# For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached
# the property that definitely `a >= 0`, then surely `v1 <= v0`, even
# though it can't be assumed anything about `v1 < 0` and `v1 == v0`
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
continue
elif (i > 0) == true:
return False
elif (i >= 0) == true:
# See analogous considerations in __lt__
return False
raise TypeError("Non-comparable index functions")
# Note: unlike `__lt__`, if we end up here, then *it is* <=. For example,
# with `v0` and `v1` as above, we would get here
return True
@_asvector()
def __ge__(self, other):
return other.__le__(self)
def __getitem__(self, key):
ret = super(Vector, self).__getitem__(key)
return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret
def __repr__(self):
return "(%s)" % ','.join(str(i) for i in self)
@property
def rank(self):
return len(self)
@property
def sum(self):
return sum(self)
@property
def is_constant(self):
return all(is_integer(i) for i in self)
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.
"""
return self - other
class LabeledVector(Vector):
"""
A Vector that associates a Dimension to each element.
"""
def __new__(cls, items=None):
try:
labels, values = zip(*items)
except (ValueError, TypeError):
labels, values = (), ()
if not all(isinstance(i, Dimension) for i in labels):
raise ValueError("All labels must be of type Dimension, got [%s]"
% ','.join(i.__class__.__name__ for i in labels))
obj = super(LabeledVector, cls).__new__(cls, *values)
obj.labels = labels
return obj
@classmethod
def transpose(cls, *vectors):
"""
Transpose a matrix represented as an iterable of homogeneous LabeledVectors.
"""
if len(vectors) == 0:
return LabeledVector()
if not all(isinstance(v, LabeledVector) for v in vectors):
raise ValueError("All items must be of type LabeledVector, got [%s]"
% ','.join(i.__class__.__name__ for i in vectors))
T = OrderedDict()
for v in vectors:
for l, i in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple((l, Vector(*i)) for l, i in T.items())
def __repr__(self):
return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self))
def __hash__(self):
return hash((tuple(self), self.labels))
def __eq__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __getitem__(self, index):
if isinstance(index, (slice, int)):
return super(LabeledVector, self).__getitem__(index)
elif isinstance(index, Dimension):
for d in index._defines:
if d in self.labels:
i = self.labels.index(d)
return super(LabeledVector, self).__getitem__(i)
return None
else:
raise TypeError("Indices must be integers, slices, or Dimensions, not %s"
% type(index))
def fromlabel(self, label, v=None):
return self[label] if label in self.labels else v
def items(self):
return zip(self.labels, self)
@memoized_meth
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed.
"""
if not isinstance(other, LabeledVector):
raise TypeError("Cannot compute distance from obj of type %s", type(other))
if self.labels != other.labels:
raise TypeError("Cannot compute distance due to mismatching `labels`")
return LabeledVector(list(zip(self.labels, self - other)))
# Utility functions
def vmin(*vectors):
"""
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i < ret or i <= ret:
ret = i
return ret
# MASKED: vmax function (lines 339-358)
|
def vmax(*vectors):
"""
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i > ret or i >= ret:
ret = i
return ret
| 339
| 358
|
from collections import OrderedDict
from sympy import Basic, true
from devito.tools import as_tuple, is_integer, memoized_meth
from devito.types import Dimension
__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax']
class Vector(tuple):
"""
A representation of an object in Z^n.
The elements of a Vector can be integers or generic SymPy expressions.
Notes
-----
1) Vector-scalar comparison
If a comparison between a vector and a non-vector is attempted, then the
non-vector is promoted to a vector; if this is not possible, an exception
is raised. This is handy because it turns a vector-scalar comparison into
a vector-vector comparison with the scalar broadcasted to all vector entries.
For example: ::
(3, 4, 5) > 4 => (3, 4, 5) > (4, 4, 4) => False
2) Comparing Vector entries when these are SymPy expressions
When we compare two symbolic (SymPy expressions) entries, it might not be
possible to determine the truth value of the relation. For example, the
truth value of `3*i < 4*j` cannot be determined (unless some information
about `i` and `j` is available). In some cases, however, the comparison is
feasible; for example, `i + 4 < i` is definitely False. A sufficient condition
for two Vectors to be comparable is that their pair-wise indices are affine
functions of the same variables, with identical coefficient.
If the Vector is instantiated passing the keyword argument ``smart = True``,
some manipulation will be attempted to infer the truth value of a non-trivial
symbolic relation. This increases the cost of the comparison, while potentially
being ineffective, so use it judiciously. By default, ``smart = False``.
Raises
------
TypeError
If two Vectors cannot be compared, e.g. due to incomparable symbolic entries.
"""
def __new__(cls, *items, smart=False):
if not all(is_integer(i) or isinstance(i, Basic) for i in items):
raise TypeError("Illegal Vector element type")
obj = super(Vector, cls).__new__(cls, items)
obj.smart = smart
return obj
def _asvector(relax=False):
def __asvector(func):
def wrapper(self, other):
if not isinstance(other, Vector):
try:
other = Vector(*other)
except TypeError:
# Not iterable
other = Vector(*(as_tuple(other)*len(self)))
if relax is False and len(self) != len(other):
raise TypeError("Cannot operate with Vectors of different rank")
return func(self, other)
return wrapper
return __asvector
def __hash__(self):
return super(Vector, self).__hash__()
@_asvector()
def __add__(self, other):
return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __radd__(self, other):
return self + other
@_asvector()
def __sub__(self, other):
return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __rsub__(self, other):
return self - other
@_asvector(relax=True)
def __eq__(self, other):
return super(Vector, self).__eq__(other)
@_asvector(relax=True)
def __ne__(self, other):
return super(Vector, self).__ne__(other)
@_asvector()
def __lt__(self, other):
# This might raise an exception if the distance between the i-th entry
# of `self` and `other` isn't integer, but rather a generic expression
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i < 0` is generally False, so __lt__ must
# return False
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
@_asvector()
def __gt__(self, other):
return other.__lt__(self)
@_asvector()
def __le__(self, other):
if self.__eq__(other):
return True
# We cannot simply resort to `__lt__` as it might happen that:
# * v0 < v1 --> False
# * v0 == v1 --> False
# But
# * v0 <= v1 --> True
#
# For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached
# the property that definitely `a >= 0`, then surely `v1 <= v0`, even
# though it can't be assumed anything about `v1 < 0` and `v1 == v0`
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
continue
elif (i > 0) == true:
return False
elif (i >= 0) == true:
# See analogous considerations in __lt__
return False
raise TypeError("Non-comparable index functions")
# Note: unlike `__lt__`, if we end up here, then *it is* <=. For example,
# with `v0` and `v1` as above, we would get here
return True
@_asvector()
def __ge__(self, other):
return other.__le__(self)
def __getitem__(self, key):
ret = super(Vector, self).__getitem__(key)
return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret
def __repr__(self):
return "(%s)" % ','.join(str(i) for i in self)
@property
def rank(self):
return len(self)
@property
def sum(self):
return sum(self)
@property
def is_constant(self):
return all(is_integer(i) for i in self)
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.
"""
return self - other
class LabeledVector(Vector):
"""
A Vector that associates a Dimension to each element.
"""
def __new__(cls, items=None):
try:
labels, values = zip(*items)
except (ValueError, TypeError):
labels, values = (), ()
if not all(isinstance(i, Dimension) for i in labels):
raise ValueError("All labels must be of type Dimension, got [%s]"
% ','.join(i.__class__.__name__ for i in labels))
obj = super(LabeledVector, cls).__new__(cls, *values)
obj.labels = labels
return obj
@classmethod
def transpose(cls, *vectors):
"""
Transpose a matrix represented as an iterable of homogeneous LabeledVectors.
"""
if len(vectors) == 0:
return LabeledVector()
if not all(isinstance(v, LabeledVector) for v in vectors):
raise ValueError("All items must be of type LabeledVector, got [%s]"
% ','.join(i.__class__.__name__ for i in vectors))
T = OrderedDict()
for v in vectors:
for l, i in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple((l, Vector(*i)) for l, i in T.items())
def __repr__(self):
return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self))
def __hash__(self):
return hash((tuple(self), self.labels))
def __eq__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __getitem__(self, index):
if isinstance(index, (slice, int)):
return super(LabeledVector, self).__getitem__(index)
elif isinstance(index, Dimension):
for d in index._defines:
if d in self.labels:
i = self.labels.index(d)
return super(LabeledVector, self).__getitem__(i)
return None
else:
raise TypeError("Indices must be integers, slices, or Dimensions, not %s"
% type(index))
def fromlabel(self, label, v=None):
return self[label] if label in self.labels else v
def items(self):
return zip(self.labels, self)
@memoized_meth
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed.
"""
if not isinstance(other, LabeledVector):
raise TypeError("Cannot compute distance from obj of type %s", type(other))
if self.labels != other.labels:
raise TypeError("Cannot compute distance due to mismatching `labels`")
return LabeledVector(list(zip(self.labels, self - other)))
# Utility functions
def vmin(*vectors):
"""
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i < ret or i <= ret:
ret = i
return ret
def vmax(*vectors):
"""
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i > ret or i >= ret:
ret = i
return ret
|
to_hdulist
|
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
# MASKED: to_hdulist function (lines 163-202)
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
|
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
| 163
| 202
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
|
psf_at_energy_and_theta
|
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
# MASKED: psf_at_energy_and_theta function (lines 211-250)
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
|
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
| 211
| 250
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
|
plot_containment
|
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
# MASKED: plot_containment function (lines 276-323)
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
|
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
| 276
| 323
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
|
info
|
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
# MASKED: info function (lines 372-416)
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
|
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
| 372
| 416
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
|
build_model
|
Build and register model.
The default builds a classification model along with its
optimizer and scheduler.
Custom trainers can re-implement this method if necessary.
|
import time
import numpy as np
import os.path as osp
import datetime
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import nni
from dassl.data import DataManager
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import (
MetricMeter, AverageMeter, tolist_if_not, count_num_param, load_checkpoint,
save_checkpoint, resume_from_checkpoint, load_pretrained_weights
)
from dassl.modeling import build_head, build_backbone
from dassl.evaluation import build_evaluator
class SimpleNet(nn.Module):
"""A simple neural network composed of a CNN backbone
and optionally a head such as mlp for classification.
"""
def __init__(self, cfg, model_cfg, num_classes, **kwargs):
super().__init__()
self.backbone = build_backbone(
model_cfg.BACKBONE.NAME,
verbose=cfg.VERBOSE,
pretrained=model_cfg.BACKBONE.PRETRAINED,
**kwargs
)
fdim = self.backbone.out_features
print("------------------------fdim:", fdim)
self.head = None
if model_cfg.HEAD.NAME and model_cfg.HEAD.HIDDEN_LAYERS:
self.head = build_head(
model_cfg.HEAD.NAME,
verbose=cfg.VERBOSE,
in_features=fdim,
hidden_layers=model_cfg.HEAD.HIDDEN_LAYERS,
activation=model_cfg.HEAD.ACTIVATION,
bn=model_cfg.HEAD.BN,
dropout=model_cfg.HEAD.DROPOUT,
**kwargs
)
fdim = self.head.out_features
self.classifier = None
if num_classes > 0:
self.classifier = nn.Linear(fdim, num_classes)
self._fdim = fdim
@property
def fdim(self):
return self._fdim
def forward(self, x, return_feature=False):
f = self.backbone(x)
if self.head is not None:
f = self.head(f)
if self.classifier is None:
return f
y = self.classifier(f)
if return_feature:
return y, f
return y
class TrainerBase:
"""Base class for iterative trainer."""
def __init__(self):
self._models = OrderedDict()
self._optims = OrderedDict()
self._scheds = OrderedDict()
self._writer = None
def register_model(self, name='model', model=None, optim=None, sched=None):
if self.__dict__.get('_models') is None:
raise AttributeError(
'Cannot assign model before super().__init__() call'
)
if self.__dict__.get('_optims') is None:
raise AttributeError(
'Cannot assign optim before super().__init__() call'
)
if self.__dict__.get('_scheds') is None:
raise AttributeError(
'Cannot assign sched before super().__init__() call'
)
assert name not in self._models, 'Found duplicate model names'
self._models[name] = model
self._optims[name] = optim
self._scheds[name] = sched
def get_model_names(self, names=None):
names_real = list(self._models.keys())
if names is not None:
names = tolist_if_not(names)
for name in names:
assert name in names_real
return names
else:
return names_real
def save_model(self, epoch, directory, is_best=False, model_name=''):
names = self.get_model_names()
for name in names:
model_dict = self._models[name].state_dict()
optim_dict = None
if self._optims[name] is not None:
optim_dict = self._optims[name].state_dict()
sched_dict = None
if self._scheds[name] is not None:
sched_dict = self._scheds[name].state_dict()
save_checkpoint(
{
'state_dict': model_dict,
'epoch': epoch + 1,
'optimizer': optim_dict,
'scheduler': sched_dict
},
osp.join(directory, name),
is_best=is_best,
model_name=model_name
)
def resume_model_if_exist(self, directory):
names = self.get_model_names()
file_missing = False
for name in names:
path = osp.join(directory, name)
if not osp.exists(path):
file_missing = True
break
if file_missing:
print('No checkpoint found, train from scratch')
return 0
print(
'Found checkpoint in "{}". Will resume training'.format(directory)
)
for name in names:
path = osp.join(directory, name)
start_epoch = resume_from_checkpoint(
path, self._models[name], self._optims[name],
self._scheds[name]
)
return start_epoch
def load_model(self, directory, epoch=None):
if not directory:
print(
'Note that load_model() is skipped as no pretrained model is given'
)
return
names = self.get_model_names()
# By default, the best model is loaded
model_file = 'model-best.pth.tar'
if epoch is not None:
model_file = 'model.pth.tar-' + str(epoch)
for name in names:
model_path = osp.join(directory, name, model_file)
if not osp.exists(model_path):
raise FileNotFoundError(
'Model not found at "{}"'.format(model_path)
)
checkpoint = load_checkpoint(model_path)
state_dict = checkpoint['state_dict']
epoch = checkpoint['epoch']
print(
'Loading weights to {} '
'from "{}" (epoch = {})'.format(name, model_path, epoch)
)
self._models[name].load_state_dict(state_dict)
def set_model_mode(self, mode='train', names=None):
names = self.get_model_names(names)
for name in names:
if mode == 'train':
self._models[name].train()
else:
self._models[name].eval()
def update_lr(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._scheds[name] is not None:
self._scheds[name].step()
def detect_anomaly(self, loss):
if not torch.isfinite(loss).all():
raise FloatingPointError('Loss is infinite or NaN!')
def init_writer(self, log_dir):
if self.__dict__.get('_writer') is None or self._writer is None:
print(
'Initializing summary writer for tensorboard '
'with log_dir={}'.format(log_dir)
)
self._writer = SummaryWriter(log_dir=log_dir)
def close_writer(self):
if self._writer is not None:
self._writer.close()
def write_scalar(self, tag, scalar_value, global_step=None):
if self._writer is None:
# Do nothing if writer is not initialized
# Note that writer is only used when training is needed
pass
else:
self._writer.add_scalar(tag, scalar_value, global_step)
def train(self, start_epoch, max_epoch):
"""Generic training loops."""
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_train()
def before_train(self):
pass
def after_train(self):
pass
def before_epoch(self):
pass
def after_epoch(self):
pass
def run_epoch(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def parse_batch_train(self, batch):
raise NotImplementedError
def parse_batch_test(self, batch):
raise NotImplementedError
def forward_backward(self, batch):
raise NotImplementedError
def model_inference(self, input):
raise NotImplementedError
def model_zero_grad(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
self._optims[name].zero_grad()
def model_backward(self, loss):
self.detect_anomaly(loss)
if not self.use_amp:
loss.backward()
else:
self.scaler.scale(loss).backward()
def model_update(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
if not self.use_amp:
self._optims[name].step()
else:
self.scaler.step(self._optims[name])
def model_backward_and_update(self, loss, names=None):
self.model_zero_grad(names)
self.model_backward(loss)
self.model_update(names)
if self.use_amp:
self.scaler.update()
class SimpleTrainer(TrainerBase):
"""A simple trainer class implementing generic functions."""
def __init__(self, cfg):
super().__init__()
self.check_cfg(cfg)
if torch.cuda.is_available() and cfg.USE_CUDA:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# use amp to accelerate training
self.use_amp = cfg.TRAIN.USE_AMP
if self.use_amp:
self.scaler = torch.cuda.amp.GradScaler()
# Save as attributes some frequently used variables
self.start_epoch = self.epoch = 0
self.max_epoch = cfg.OPTIM.MAX_EPOCH
self.output_dir = cfg.OUTPUT_DIR
self.cfg = cfg
self.build_data_loader()
self.build_model()
self.evaluator = build_evaluator(cfg, lab2cname=self.dm.lab2cname)
# zhaoxin modify
self.best_val_acc = -np.inf
self.best_test_acc = -np.inf
self.best_val_test_acc = 0
self.best_val_epoch = 0
self.best_test_epoch = 0
def check_cfg(self, cfg):
"""Check whether some variables are set correctly for
the trainer (optional).
For example, a trainer might require a particular sampler
for training such as 'RandomDomainSampler', so it is good
to do the checking:
assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler'
"""
pass
def build_data_loader(self):
"""Create essential data-related attributes.
What must be done in the re-implementation
of this method:
1) initialize data manager
2) assign as attributes the data loaders
3) assign as attribute the number of classes
"""
self.dm = DataManager(self.cfg)
self.train_loader_x = self.dm.train_loader_x
self.train_loader_u = self.dm.train_loader_u
self.val_loader = self.dm.val_loader
self.test_loader = self.dm.test_loader
self.num_classes = self.dm.num_classes
# MASKED: build_model function (lines 378-398)
def train(self):
super().train(self.start_epoch, self.max_epoch)
def before_train(self):
# directory = self.cfg.OUTPUT_DIR
if self.cfg.RESUME:
directory = self.cfg.RESUME
self.start_epoch = self.resume_model_if_exist(directory)
# Initialize summary writer
self.init_writer(self.output_dir)
# Remember the starting time (for computing the elapsed time)
self.time_start = time.time()
def after_train(self):
print('Finished training')
do_test = not self.cfg.TEST.NO_TEST
if do_test and not self.cfg.NNI:
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print('Deploy the model with the best val performance')
self.load_model(self.output_dir)
# zhaoxin modify
if self.cfg.TEST.PER_CLASS_RESULT:
self.best_val_test_acc, per_class_accs = self.test(return_per_class_results=True)
perclass_path = osp.join(self.output_dir, 'perclass_result.txt')
with open(perclass_path, 'w') as f:
for acc in per_class_accs:
f.write("{:6f}\n".format(acc))
else:
self.best_val_test_acc = self.test()
# zhaoxin add
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print(
'best_val_acc: {}\nbest_val_epoch: {}\nbest_val_test_acc: {}'.
format(
self.best_val_acc, self.best_val_epoch,
self.best_val_test_acc
)
)
if self.cfg.TEST.TEST_EVERY_EPOCH:
print(
'best_test_acc: {}\nbest_test_epoch: {}'.format(
self.best_test_acc, self.best_test_epoch
)
)
result_path = osp.join(self.output_dir, 'result.txt')
with open(result_path, 'w') as f:
f.write("{:6f}\n".format(self.best_val_test_acc))
if self.cfg.NNI:
nni.report_final_result(self.best_val_acc)
# Show elapsed time
elapsed = round(time.time() - self.time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed: {}'.format(elapsed))
# Close writer
self.close_writer()
def after_epoch(self):
last_epoch = (self.epoch + 1) == self.max_epoch
do_test = not self.cfg.TEST.NO_TEST
meet_checkpoint_freq = (
self.epoch + 1
) % self.cfg.TRAIN.CHECKPOINT_FREQ == 0 if self.cfg.TRAIN.CHECKPOINT_FREQ > 0 else False
# zhaoxin modify
if do_test and self.cfg.TEST.FINAL_MODEL == 'best_val':
curr_val_acc = self.test(split='val')
# nni: report intermediate result
if self.cfg.NNI:
nni.report_intermediate_result(curr_val_acc)
is_best = curr_val_acc > self.best_val_acc
if is_best:
self.best_val_acc = curr_val_acc
self.best_val_epoch = self.epoch + 1
self.save_model(
self.epoch,
self.output_dir,
model_name='model-best.pth.tar'
)
if do_test and self.cfg.TEST.TEST_EVERY_EPOCH:
curr_test_acc = self.test(split='test')
if curr_test_acc > self.best_test_acc:
self.best_test_acc = curr_test_acc
self.best_test_epoch = self.epoch + 1
# if self.cfg.TEST.FINAL_MODEL == 'best_val':
# if is_best:
# self.best_val_test_acc = curr_test_acc
if meet_checkpoint_freq or last_epoch:
self.save_model(self.epoch, self.output_dir)
@torch.no_grad()
def test(self, split=None, return_per_class_results=False):
"""A generic testing pipeline."""
self.set_model_mode('eval')
self.evaluator.reset()
if split is None:
split = self.cfg.TEST.SPLIT
if split == 'val' and self.val_loader is not None:
data_loader = self.val_loader
print('Do evaluation on {} set'.format(split))
else:
data_loader = self.test_loader
print('Do evaluation on test set')
for batch_idx, batch in enumerate(data_loader):
input, label = self.parse_batch_test(batch)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
for k, v in results.items():
if k == 'perclass_accuracies':
continue
tag = '{}/{}'.format(split, k)
self.write_scalar(tag, v, self.epoch)
if not return_per_class_results:
return list(results.values())[0]
else:
return results['accuracy'], results['perclass_accuracies']
def model_inference(self, input):
return self.model(input)
def parse_batch_test(self, batch):
input = batch['img']
label = batch['label']
input = input.to(self.device)
label = label.to(self.device)
return input, label
def get_current_lr(self, names=None):
names = self.get_model_names(names)
name = names[0]
return self._optims[name].param_groups[0]['lr']
class TrainerXU(SimpleTrainer):
"""A base trainer using both labeled and unlabeled data.
In the context of domain adaptation, labeled and unlabeled data
come from source and target domains respectively.
When it comes to semi-supervised learning, all data comes from the
same domain.
"""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
# Decide to iterate over labeled or unlabeled dataset
len_train_loader_x = len(self.train_loader_x)
len_train_loader_u = len(self.train_loader_u)
if self.cfg.TRAIN.COUNT_ITER == 'train_x':
self.num_batches = len_train_loader_x
elif self.cfg.TRAIN.COUNT_ITER == 'train_u':
self.num_batches = len_train_loader_u
elif self.cfg.TRAIN.COUNT_ITER == 'smaller_one':
self.num_batches = min(len_train_loader_x, len_train_loader_u)
else:
raise ValueError
train_loader_x_iter = iter(self.train_loader_x)
train_loader_u_iter = iter(self.train_loader_u)
end = time.time()
for self.batch_idx in range(self.num_batches):
try:
batch_x = next(train_loader_x_iter)
except StopIteration:
train_loader_x_iter = iter(self.train_loader_x)
batch_x = next(train_loader_x_iter)
try:
batch_u = next(train_loader_u_iter)
except StopIteration:
train_loader_u_iter = iter(self.train_loader_u)
batch_u = next(train_loader_u_iter)
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch_x, batch_u)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch_x, batch_u):
input_x = batch_x['img']
label_x = batch_x['label']
input_u = batch_u['img']
input_x = input_x.to(self.device)
label_x = label_x.to(self.device)
input_u = input_u.to(self.device)
return input_x, label_x, input_u
class TrainerX(SimpleTrainer):
"""A base trainer using labeled data only."""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
self.num_batches = len(self.train_loader_x)
end = time.time()
for self.batch_idx, batch in enumerate(self.train_loader_x):
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch):
input = batch['img']
label = batch['label']
domain = batch['domain']
input = input.to(self.device)
label = label.to(self.device)
domain = domain.to(self.device)
return input, label, domain
|
def build_model(self):
"""Build and register model.
The default builds a classification model along with its
optimizer and scheduler.
Custom trainers can re-implement this method if necessary.
"""
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes)
# for name, module in self.model.named_children():
# print(name)
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
print('# params: {:,}'.format(count_num_param(self.model)))
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model('model', self.model, self.optim, self.sched)
| 378
| 398
|
import time
import numpy as np
import os.path as osp
import datetime
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import nni
from dassl.data import DataManager
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import (
MetricMeter, AverageMeter, tolist_if_not, count_num_param, load_checkpoint,
save_checkpoint, resume_from_checkpoint, load_pretrained_weights
)
from dassl.modeling import build_head, build_backbone
from dassl.evaluation import build_evaluator
class SimpleNet(nn.Module):
"""A simple neural network composed of a CNN backbone
and optionally a head such as mlp for classification.
"""
def __init__(self, cfg, model_cfg, num_classes, **kwargs):
super().__init__()
self.backbone = build_backbone(
model_cfg.BACKBONE.NAME,
verbose=cfg.VERBOSE,
pretrained=model_cfg.BACKBONE.PRETRAINED,
**kwargs
)
fdim = self.backbone.out_features
print("------------------------fdim:", fdim)
self.head = None
if model_cfg.HEAD.NAME and model_cfg.HEAD.HIDDEN_LAYERS:
self.head = build_head(
model_cfg.HEAD.NAME,
verbose=cfg.VERBOSE,
in_features=fdim,
hidden_layers=model_cfg.HEAD.HIDDEN_LAYERS,
activation=model_cfg.HEAD.ACTIVATION,
bn=model_cfg.HEAD.BN,
dropout=model_cfg.HEAD.DROPOUT,
**kwargs
)
fdim = self.head.out_features
self.classifier = None
if num_classes > 0:
self.classifier = nn.Linear(fdim, num_classes)
self._fdim = fdim
@property
def fdim(self):
return self._fdim
def forward(self, x, return_feature=False):
f = self.backbone(x)
if self.head is not None:
f = self.head(f)
if self.classifier is None:
return f
y = self.classifier(f)
if return_feature:
return y, f
return y
class TrainerBase:
"""Base class for iterative trainer."""
def __init__(self):
self._models = OrderedDict()
self._optims = OrderedDict()
self._scheds = OrderedDict()
self._writer = None
def register_model(self, name='model', model=None, optim=None, sched=None):
if self.__dict__.get('_models') is None:
raise AttributeError(
'Cannot assign model before super().__init__() call'
)
if self.__dict__.get('_optims') is None:
raise AttributeError(
'Cannot assign optim before super().__init__() call'
)
if self.__dict__.get('_scheds') is None:
raise AttributeError(
'Cannot assign sched before super().__init__() call'
)
assert name not in self._models, 'Found duplicate model names'
self._models[name] = model
self._optims[name] = optim
self._scheds[name] = sched
def get_model_names(self, names=None):
names_real = list(self._models.keys())
if names is not None:
names = tolist_if_not(names)
for name in names:
assert name in names_real
return names
else:
return names_real
def save_model(self, epoch, directory, is_best=False, model_name=''):
names = self.get_model_names()
for name in names:
model_dict = self._models[name].state_dict()
optim_dict = None
if self._optims[name] is not None:
optim_dict = self._optims[name].state_dict()
sched_dict = None
if self._scheds[name] is not None:
sched_dict = self._scheds[name].state_dict()
save_checkpoint(
{
'state_dict': model_dict,
'epoch': epoch + 1,
'optimizer': optim_dict,
'scheduler': sched_dict
},
osp.join(directory, name),
is_best=is_best,
model_name=model_name
)
def resume_model_if_exist(self, directory):
names = self.get_model_names()
file_missing = False
for name in names:
path = osp.join(directory, name)
if not osp.exists(path):
file_missing = True
break
if file_missing:
print('No checkpoint found, train from scratch')
return 0
print(
'Found checkpoint in "{}". Will resume training'.format(directory)
)
for name in names:
path = osp.join(directory, name)
start_epoch = resume_from_checkpoint(
path, self._models[name], self._optims[name],
self._scheds[name]
)
return start_epoch
def load_model(self, directory, epoch=None):
if not directory:
print(
'Note that load_model() is skipped as no pretrained model is given'
)
return
names = self.get_model_names()
# By default, the best model is loaded
model_file = 'model-best.pth.tar'
if epoch is not None:
model_file = 'model.pth.tar-' + str(epoch)
for name in names:
model_path = osp.join(directory, name, model_file)
if not osp.exists(model_path):
raise FileNotFoundError(
'Model not found at "{}"'.format(model_path)
)
checkpoint = load_checkpoint(model_path)
state_dict = checkpoint['state_dict']
epoch = checkpoint['epoch']
print(
'Loading weights to {} '
'from "{}" (epoch = {})'.format(name, model_path, epoch)
)
self._models[name].load_state_dict(state_dict)
def set_model_mode(self, mode='train', names=None):
names = self.get_model_names(names)
for name in names:
if mode == 'train':
self._models[name].train()
else:
self._models[name].eval()
def update_lr(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._scheds[name] is not None:
self._scheds[name].step()
def detect_anomaly(self, loss):
if not torch.isfinite(loss).all():
raise FloatingPointError('Loss is infinite or NaN!')
def init_writer(self, log_dir):
if self.__dict__.get('_writer') is None or self._writer is None:
print(
'Initializing summary writer for tensorboard '
'with log_dir={}'.format(log_dir)
)
self._writer = SummaryWriter(log_dir=log_dir)
def close_writer(self):
if self._writer is not None:
self._writer.close()
def write_scalar(self, tag, scalar_value, global_step=None):
if self._writer is None:
# Do nothing if writer is not initialized
# Note that writer is only used when training is needed
pass
else:
self._writer.add_scalar(tag, scalar_value, global_step)
def train(self, start_epoch, max_epoch):
"""Generic training loops."""
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_train()
def before_train(self):
pass
def after_train(self):
pass
def before_epoch(self):
pass
def after_epoch(self):
pass
def run_epoch(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def parse_batch_train(self, batch):
raise NotImplementedError
def parse_batch_test(self, batch):
raise NotImplementedError
def forward_backward(self, batch):
raise NotImplementedError
def model_inference(self, input):
raise NotImplementedError
def model_zero_grad(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
self._optims[name].zero_grad()
def model_backward(self, loss):
self.detect_anomaly(loss)
if not self.use_amp:
loss.backward()
else:
self.scaler.scale(loss).backward()
def model_update(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
if not self.use_amp:
self._optims[name].step()
else:
self.scaler.step(self._optims[name])
def model_backward_and_update(self, loss, names=None):
self.model_zero_grad(names)
self.model_backward(loss)
self.model_update(names)
if self.use_amp:
self.scaler.update()
class SimpleTrainer(TrainerBase):
"""A simple trainer class implementing generic functions."""
def __init__(self, cfg):
super().__init__()
self.check_cfg(cfg)
if torch.cuda.is_available() and cfg.USE_CUDA:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# use amp to accelerate training
self.use_amp = cfg.TRAIN.USE_AMP
if self.use_amp:
self.scaler = torch.cuda.amp.GradScaler()
# Save as attributes some frequently used variables
self.start_epoch = self.epoch = 0
self.max_epoch = cfg.OPTIM.MAX_EPOCH
self.output_dir = cfg.OUTPUT_DIR
self.cfg = cfg
self.build_data_loader()
self.build_model()
self.evaluator = build_evaluator(cfg, lab2cname=self.dm.lab2cname)
# zhaoxin modify
self.best_val_acc = -np.inf
self.best_test_acc = -np.inf
self.best_val_test_acc = 0
self.best_val_epoch = 0
self.best_test_epoch = 0
def check_cfg(self, cfg):
"""Check whether some variables are set correctly for
the trainer (optional).
For example, a trainer might require a particular sampler
for training such as 'RandomDomainSampler', so it is good
to do the checking:
assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler'
"""
pass
def build_data_loader(self):
"""Create essential data-related attributes.
What must be done in the re-implementation
of this method:
1) initialize data manager
2) assign as attributes the data loaders
3) assign as attribute the number of classes
"""
self.dm = DataManager(self.cfg)
self.train_loader_x = self.dm.train_loader_x
self.train_loader_u = self.dm.train_loader_u
self.val_loader = self.dm.val_loader
self.test_loader = self.dm.test_loader
self.num_classes = self.dm.num_classes
def build_model(self):
"""Build and register model.
The default builds a classification model along with its
optimizer and scheduler.
Custom trainers can re-implement this method if necessary.
"""
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes)
# for name, module in self.model.named_children():
# print(name)
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
print('# params: {:,}'.format(count_num_param(self.model)))
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model('model', self.model, self.optim, self.sched)
def train(self):
super().train(self.start_epoch, self.max_epoch)
def before_train(self):
# directory = self.cfg.OUTPUT_DIR
if self.cfg.RESUME:
directory = self.cfg.RESUME
self.start_epoch = self.resume_model_if_exist(directory)
# Initialize summary writer
self.init_writer(self.output_dir)
# Remember the starting time (for computing the elapsed time)
self.time_start = time.time()
def after_train(self):
print('Finished training')
do_test = not self.cfg.TEST.NO_TEST
if do_test and not self.cfg.NNI:
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print('Deploy the model with the best val performance')
self.load_model(self.output_dir)
# zhaoxin modify
if self.cfg.TEST.PER_CLASS_RESULT:
self.best_val_test_acc, per_class_accs = self.test(return_per_class_results=True)
perclass_path = osp.join(self.output_dir, 'perclass_result.txt')
with open(perclass_path, 'w') as f:
for acc in per_class_accs:
f.write("{:6f}\n".format(acc))
else:
self.best_val_test_acc = self.test()
# zhaoxin add
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print(
'best_val_acc: {}\nbest_val_epoch: {}\nbest_val_test_acc: {}'.
format(
self.best_val_acc, self.best_val_epoch,
self.best_val_test_acc
)
)
if self.cfg.TEST.TEST_EVERY_EPOCH:
print(
'best_test_acc: {}\nbest_test_epoch: {}'.format(
self.best_test_acc, self.best_test_epoch
)
)
result_path = osp.join(self.output_dir, 'result.txt')
with open(result_path, 'w') as f:
f.write("{:6f}\n".format(self.best_val_test_acc))
if self.cfg.NNI:
nni.report_final_result(self.best_val_acc)
# Show elapsed time
elapsed = round(time.time() - self.time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed: {}'.format(elapsed))
# Close writer
self.close_writer()
def after_epoch(self):
last_epoch = (self.epoch + 1) == self.max_epoch
do_test = not self.cfg.TEST.NO_TEST
meet_checkpoint_freq = (
self.epoch + 1
) % self.cfg.TRAIN.CHECKPOINT_FREQ == 0 if self.cfg.TRAIN.CHECKPOINT_FREQ > 0 else False
# zhaoxin modify
if do_test and self.cfg.TEST.FINAL_MODEL == 'best_val':
curr_val_acc = self.test(split='val')
# nni: report intermediate result
if self.cfg.NNI:
nni.report_intermediate_result(curr_val_acc)
is_best = curr_val_acc > self.best_val_acc
if is_best:
self.best_val_acc = curr_val_acc
self.best_val_epoch = self.epoch + 1
self.save_model(
self.epoch,
self.output_dir,
model_name='model-best.pth.tar'
)
if do_test and self.cfg.TEST.TEST_EVERY_EPOCH:
curr_test_acc = self.test(split='test')
if curr_test_acc > self.best_test_acc:
self.best_test_acc = curr_test_acc
self.best_test_epoch = self.epoch + 1
# if self.cfg.TEST.FINAL_MODEL == 'best_val':
# if is_best:
# self.best_val_test_acc = curr_test_acc
if meet_checkpoint_freq or last_epoch:
self.save_model(self.epoch, self.output_dir)
@torch.no_grad()
def test(self, split=None, return_per_class_results=False):
"""A generic testing pipeline."""
self.set_model_mode('eval')
self.evaluator.reset()
if split is None:
split = self.cfg.TEST.SPLIT
if split == 'val' and self.val_loader is not None:
data_loader = self.val_loader
print('Do evaluation on {} set'.format(split))
else:
data_loader = self.test_loader
print('Do evaluation on test set')
for batch_idx, batch in enumerate(data_loader):
input, label = self.parse_batch_test(batch)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
for k, v in results.items():
if k == 'perclass_accuracies':
continue
tag = '{}/{}'.format(split, k)
self.write_scalar(tag, v, self.epoch)
if not return_per_class_results:
return list(results.values())[0]
else:
return results['accuracy'], results['perclass_accuracies']
def model_inference(self, input):
return self.model(input)
def parse_batch_test(self, batch):
input = batch['img']
label = batch['label']
input = input.to(self.device)
label = label.to(self.device)
return input, label
def get_current_lr(self, names=None):
names = self.get_model_names(names)
name = names[0]
return self._optims[name].param_groups[0]['lr']
class TrainerXU(SimpleTrainer):
"""A base trainer using both labeled and unlabeled data.
In the context of domain adaptation, labeled and unlabeled data
come from source and target domains respectively.
When it comes to semi-supervised learning, all data comes from the
same domain.
"""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
# Decide to iterate over labeled or unlabeled dataset
len_train_loader_x = len(self.train_loader_x)
len_train_loader_u = len(self.train_loader_u)
if self.cfg.TRAIN.COUNT_ITER == 'train_x':
self.num_batches = len_train_loader_x
elif self.cfg.TRAIN.COUNT_ITER == 'train_u':
self.num_batches = len_train_loader_u
elif self.cfg.TRAIN.COUNT_ITER == 'smaller_one':
self.num_batches = min(len_train_loader_x, len_train_loader_u)
else:
raise ValueError
train_loader_x_iter = iter(self.train_loader_x)
train_loader_u_iter = iter(self.train_loader_u)
end = time.time()
for self.batch_idx in range(self.num_batches):
try:
batch_x = next(train_loader_x_iter)
except StopIteration:
train_loader_x_iter = iter(self.train_loader_x)
batch_x = next(train_loader_x_iter)
try:
batch_u = next(train_loader_u_iter)
except StopIteration:
train_loader_u_iter = iter(self.train_loader_u)
batch_u = next(train_loader_u_iter)
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch_x, batch_u)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch_x, batch_u):
input_x = batch_x['img']
label_x = batch_x['label']
input_u = batch_u['img']
input_x = input_x.to(self.device)
label_x = label_x.to(self.device)
input_u = input_u.to(self.device)
return input_x, label_x, input_u
class TrainerX(SimpleTrainer):
"""A base trainer using labeled data only."""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
self.num_batches = len(self.train_loader_x)
end = time.time()
for self.batch_idx, batch in enumerate(self.train_loader_x):
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch):
input = batch['img']
label = batch['label']
domain = batch['domain']
input = input.to(self.device)
label = label.to(self.device)
domain = domain.to(self.device)
return input, label, domain
|
layers
|
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
|
#!/usr/bin/env python3
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
graph = tf.get_default_graph()
input_img = graph.get_tensor_by_name(vgg_input_tensor_name)
prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_o = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_o = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_o = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_img, prob, layer3_o, layer4_o, layer7_o
tests.test_load_vgg(load_vgg, tf)
# MASKED: layers function (lines 50-82)
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
# add loss function
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# training_op
training_operation = optimizer.minimize(cross_entropy_loss)
return logits, training_operation, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
# initialize global variables
sess.run(tf.global_variables_initializer())
# going through the batches of images i.e. epoch
for epoch in range(epochs):
for (input_img, gt_img) in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: input_img,
correct_label: gt_img,
keep_prob: 0.7,
learning_rate: 5e-04})
print("Loss of {} at epoch {}/{}".format(loss, epoch, epochs))
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576) # KITTI dataset uses 160x576 images
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
epochs = 20
batch_size = 5
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
correct_label = tf.placeholder(tf.int32)
learning_rate = tf.placeholder(tf.float32)
# TODO: Build NN using load_vgg, layers, and optimize function
input_img, keep_prob, layer3_o, layer4_o, layer7_o = load_vgg(sess, vgg_path)
layer_output = layers(layer3_o, layer4_o, layer7_o, num_classes)
logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_img,
correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_img)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
|
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
# 1x1 convolution layer with road / not-road features only
conv_1by1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# upscaling size/ add features
output = tf.layers.conv2d_transpose(conv_1by1_l7, 512, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer4_out)
# upscaling size/ reduce features
output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer3_out)
# upscaling size/ reduce features to road OR not-road
output = tf.layers.conv2d_transpose(output, num_classes, 16, strides=(8,8), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='nn_final_output')
return output
| 50
| 82
|
#!/usr/bin/env python3
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
graph = tf.get_default_graph()
input_img = graph.get_tensor_by_name(vgg_input_tensor_name)
prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_o = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_o = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_o = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_img, prob, layer3_o, layer4_o, layer7_o
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
# 1x1 convolution layer with road / not-road features only
conv_1by1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# upscaling size/ add features
output = tf.layers.conv2d_transpose(conv_1by1_l7, 512, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer4_out)
# upscaling size/ reduce features
output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer3_out)
# upscaling size/ reduce features to road OR not-road
output = tf.layers.conv2d_transpose(output, num_classes, 16, strides=(8,8), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='nn_final_output')
return output
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
# add loss function
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# training_op
training_operation = optimizer.minimize(cross_entropy_loss)
return logits, training_operation, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
# initialize global variables
sess.run(tf.global_variables_initializer())
# going through the batches of images i.e. epoch
for epoch in range(epochs):
for (input_img, gt_img) in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: input_img,
correct_label: gt_img,
keep_prob: 0.7,
learning_rate: 5e-04})
print("Loss of {} at epoch {}/{}".format(loss, epoch, epochs))
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576) # KITTI dataset uses 160x576 images
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
epochs = 20
batch_size = 5
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
correct_label = tf.placeholder(tf.int32)
learning_rate = tf.placeholder(tf.float32)
# TODO: Build NN using load_vgg, layers, and optimize function
input_img, keep_prob, layer3_o, layer4_o, layer7_o = load_vgg(sess, vgg_path)
layer_output = layers(layer3_o, layer4_o, layer7_o, num_classes)
logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_img,
correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_img)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
|
train_nn
|
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
|
#!/usr/bin/env python3
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
graph = tf.get_default_graph()
input_img = graph.get_tensor_by_name(vgg_input_tensor_name)
prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_o = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_o = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_o = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_img, prob, layer3_o, layer4_o, layer7_o
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
# 1x1 convolution layer with road / not-road features only
conv_1by1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# upscaling size/ add features
output = tf.layers.conv2d_transpose(conv_1by1_l7, 512, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer4_out)
# upscaling size/ reduce features
output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer3_out)
# upscaling size/ reduce features to road OR not-road
output = tf.layers.conv2d_transpose(output, num_classes, 16, strides=(8,8), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='nn_final_output')
return output
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
# add loss function
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# training_op
training_operation = optimizer.minimize(cross_entropy_loss)
return logits, training_operation, cross_entropy_loss
tests.test_optimize(optimize)
# MASKED: train_nn function (lines 108-135)
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576) # KITTI dataset uses 160x576 images
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
epochs = 20
batch_size = 5
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
correct_label = tf.placeholder(tf.int32)
learning_rate = tf.placeholder(tf.float32)
# TODO: Build NN using load_vgg, layers, and optimize function
input_img, keep_prob, layer3_o, layer4_o, layer7_o = load_vgg(sess, vgg_path)
layer_output = layers(layer3_o, layer4_o, layer7_o, num_classes)
logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_img,
correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_img)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
|
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
# initialize global variables
sess.run(tf.global_variables_initializer())
# going through the batches of images i.e. epoch
for epoch in range(epochs):
for (input_img, gt_img) in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: input_img,
correct_label: gt_img,
keep_prob: 0.7,
learning_rate: 5e-04})
print("Loss of {} at epoch {}/{}".format(loss, epoch, epochs))
| 108
| 135
|
#!/usr/bin/env python3
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
graph = tf.get_default_graph()
input_img = graph.get_tensor_by_name(vgg_input_tensor_name)
prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_o = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_o = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_o = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_img, prob, layer3_o, layer4_o, layer7_o
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
# 1x1 convolution layer with road / not-road features only
conv_1by1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# upscaling size/ add features
output = tf.layers.conv2d_transpose(conv_1by1_l7, 512, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer4_out)
# upscaling size/ reduce features
output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer3_out)
# upscaling size/ reduce features to road OR not-road
output = tf.layers.conv2d_transpose(output, num_classes, 16, strides=(8,8), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='nn_final_output')
return output
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
# add loss function
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# training_op
training_operation = optimizer.minimize(cross_entropy_loss)
return logits, training_operation, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
# initialize global variables
sess.run(tf.global_variables_initializer())
# going through the batches of images i.e. epoch
for epoch in range(epochs):
for (input_img, gt_img) in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: input_img,
correct_label: gt_img,
keep_prob: 0.7,
learning_rate: 5e-04})
print("Loss of {} at epoch {}/{}".format(loss, epoch, epochs))
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576) # KITTI dataset uses 160x576 images
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
epochs = 20
batch_size = 5
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
correct_label = tf.placeholder(tf.int32)
learning_rate = tf.placeholder(tf.float32)
# TODO: Build NN using load_vgg, layers, and optimize function
input_img, keep_prob, layer3_o, layer4_o, layer7_o = load_vgg(sess, vgg_path)
layer_output = layers(layer3_o, layer4_o, layer7_o, num_classes)
logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_img,
correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_img)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
|
load_data
|
Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR10 small image classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.datasets.cifar import load_batch
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
# MASKED: load_data function (lines 30-60)
|
def load_data():
"""Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :],
y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| 30
| 60
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR10 small image classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.datasets.cifar import load_batch
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data():
"""Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :],
y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
|
mgeo
|
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
# MASKED: mgeo function (lines 119-140)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
| 119
| 140
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
pdf
|
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
# MASKED: pdf function (lines 272-294)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
| 272
| 294
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
pdf2
|
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
# MASKED: pdf2 function (lines 297-315)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
| 297
| 315
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
cdf
|
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
# MASKED: cdf function (lines 324-342)
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
| 324
| 342
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
bootstrap
|
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
# MASKED: bootstrap function (lines 421-446)
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
| 421
| 446
|
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
|
get_cocktail_irradiation
|
example cocktail.json
{
"chronology": "2016-06-01 17:00:00",
"j": 4e-4,
"j_err": 4e-9
}
:return:
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import shutil
from datetime import datetime
from traits.api import Bool
from uncertainties import ufloat
from pychron.core.helpers.datetime_tools import ISO_FORMAT_STR
from pychron.core.helpers.filetools import glob_list_directory, add_extension, \
list_directory
from pychron.dvc import dvc_dump, dvc_load, repository_path, list_frozen_productions
from pychron.dvc.meta_object import IrradiationGeometry, Chronology, Production, cached, Gains, LoadGeometry, \
MetaObjectException
from pychron.git_archive.repo_manager import GitRepoManager
from pychron.paths import paths, r_mkdir
from pychron.pychron_constants import INTERFERENCE_KEYS, RATIO_KEYS, DEFAULT_MONITOR_NAME, DATE_FORMAT, NULL_STR
# ============= enthought library imports =======================
def irradiation_geometry(name):
p = os.path.join(paths.meta_root, 'irradiation_holders', add_extension(name))
return IrradiationGeometry(p)
def irradiation_geometry_holes(name):
geom = irradiation_geometry(name)
return geom.holes
def irradiation_chronology(name, allow_null=False):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
return Chronology(p, allow_null=allow_null)
def dump_chronology(path, doses):
if doses is None:
doses = []
with open(path, 'w') as wfile:
for p, s, e in doses:
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(p, str):
p = '{:0.3f}'.format(p)
line = '{},{},{}\n'.format(p, s, e)
wfile.write(line)
def gain_path(name):
root = os.path.join(paths.meta_root, 'spectrometers')
if not os.path.isdir(root):
os.mkdir(root)
p = os.path.join(root, add_extension('{}.gain'.format(name), '.json'))
return p
def get_frozen_productions(repo):
prods = {}
for name, path in list_frozen_productions(repo):
prods[name] = Production(path)
return prods
def get_frozen_flux(repo, irradiation):
path = repository_path(repo, '{}.json'.format(irradiation))
fd = {}
if path:
fd = dvc_load(path)
for fi in fd.values():
fi['j'] = ufloat(*fi['j'], tag='J')
return fd
class MetaRepo(GitRepoManager):
clear_cache = Bool
def get_monitor_info(self, irrad, level):
age, decay = NULL_STR, NULL_STR
positions = self._get_level_positions(irrad, level)
# assume all positions have same monitor_age/decay constant. Not strictly true. Potential some ambiquity but
# will not be resolved now 8/26/18.
if positions:
position = positions[0]
opt = position.get('options')
if opt:
age = position.get('monitor_age', NULL_STR)
decayd = position.get('decay_constants')
if decayd:
decay = decayd.get('lambda_k_total', NULL_STR)
return str(age), str(decay)
def add_unstaged(self, *args, **kw):
super(MetaRepo, self).add_unstaged(self.path, **kw)
def save_gains(self, ms, gains_dict):
p = gain_path(ms)
dvc_dump(gains_dict, p)
if self.add_paths(p):
self.commit('Updated gains')
def update_script(self, rootname, name, path_or_blob):
self._update_text(os.path.join('scripts', rootname.lower()), name, path_or_blob)
def update_experiment_queue(self, rootname, name, path_or_blob):
self._update_text(os.path.join('experiments', rootname.lower()), name, path_or_blob)
def update_level_production(self, irrad, name, prname, note=None):
prname = prname.replace(' ', '_')
pathname = add_extension(prname, '.json')
src = os.path.join(paths.meta_root, irrad, 'productions', pathname)
if os.path.isfile(src):
self.update_productions(irrad, name, prname, note=note)
else:
self.warning_dialog('Invalid production name'.format(prname))
def update_level_monitor(self, irradiation, level, monitor_name, monitor_material, monitor_age, lambda_k):
path = self.get_level_path(irradiation, level)
obj = dvc_load(path)
positions = self._get_level_positions(irradiation, level)
options = {'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
decay_constants = {'lambda_k_total': lambda_k, 'lambda_k_total_error': 0}
for p in positions:
p['options'] = options
p['decay_constants'] = decay_constants
obj['positions'] = positions
dvc_dump(obj, path)
def add_production_to_irradiation(self, irrad, name, params, add=True, commit=False):
self.debug('adding production {} to irradiation={}'.format(name, irrad))
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(name, '.json'))
prod = Production(p, new=not os.path.isfile(p))
prod.update(params)
prod.dump()
if add:
self.add(p, commit=commit)
def add_production(self, irrad, name, obj, commit=False, add=True):
p = self.get_production(irrad, name, force=True)
p.attrs = attrs = INTERFERENCE_KEYS + RATIO_KEYS
kef = lambda x: '{}_err'.format(x)
if obj:
def values():
return ((k, getattr(obj, k), kef(k), getattr(obj, kef(k))) for k in attrs)
else:
def values():
return ((k, 0, kef(k), 0) for k in attrs)
for k, v, ke, e in values():
setattr(p, k, v)
setattr(p, ke, e)
p.dump()
if add:
self.add(p.path, commit=commit)
def update_production(self, prod, irradiation=None):
ip = self.get_production(prod.name)
self.debug('saving production {}'.format(prod.name))
params = prod.get_params()
for k, v in params.items():
self.debug('setting {}={}'.format(k, v))
setattr(ip, k, v)
ip.note = prod.note
self.add(ip.path, commit=False)
self.commit('updated production {}'.format(prod.name))
def update_productions(self, irrad, level, production, note=None, add=True):
p = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(p)
obj['note'] = str(note) or ''
if level in obj:
if obj[level] != production:
self.debug('setting production to irrad={}, level={}, prod={}'.format(irrad, level, production))
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
else:
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def set_identifier(self, irradiation, level, pos, identifier):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
positions = self._get_level_positions(irradiation, level)
d = next((p for p in positions if p['position'] == pos), None)
if d:
d['identifier'] = identifier
jd['positions'] = positions
dvc_dump(jd, p)
self.add(p, commit=False)
def get_level_path(self, irrad, level):
return os.path.join(paths.meta_root, irrad, '{}.json'.format(level))
def add_level(self, irrad, level, add=True):
p = self.get_level_path(irrad, level)
lv = dict(z=0, positions=[])
dvc_dump(lv, p)
if add:
self.add(p, commit=False)
def add_chronology(self, irrad, doses, add=True):
p = os.path.join(paths.meta_root, irrad, 'chronology.txt')
dump_chronology(p, doses)
if add:
self.add(p, commit=False)
def add_irradiation(self, name):
p = os.path.join(paths.meta_root, name)
if not os.path.isdir(p):
os.mkdir(p)
def add_position(self, irradiation, level, pos, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
pd = next((p for p in positions if p['position'] == pos), None)
if pd is None:
positions.append({'position': pos, 'decay_constants': {}})
dvc_dump({'z': z, 'positions': positions}, p)
if add:
self.add(p, commit=False)
def add_irradiation_geometry_file(self, path):
try:
holder = IrradiationGeometry(path)
if not holder.holes:
raise BaseException
except BaseException:
self.warning_dialog('Invalid Irradiation Geometry file. Failed to import')
return
self.smart_pull()
root = os.path.join(paths.meta_root, 'irradiation_holders')
if not os.path.isdir(root):
os.mkdir(root)
name = os.path.basename(path)
dest = os.path.join(root, name)
shutil.copyfile(path, dest)
self.add(dest, commit=False)
self.commit('added irradiation geometry file {}'.format(name))
self.push()
self.information_dialog('Irradiation Geometry "{}" added'.format(name))
# p = os.path.join(root, add_extension(name))
# def add_irradiation_holder(self, name, blob, commit=False, overwrite=False, add=True):
# root = os.path.join(paths.meta_root, 'irradiation_holders')
# if not os.path.isdir(root):
# os.mkdir(root)
# p = os.path.join(root, add_extension(name))
#
# if not os.path.isfile(p) or overwrite:
# with open(p, 'w') as wfile:
# holes = list(iter_geom(blob))
# n = len(holes)
# wfile.write('{},0.0175\n'.format(n))
# for idx, (x, y, r) in holes:
# wfile.write('{:0.4f},{:0.4f},{:0.4f}\n'.format(x, y, r))
# if add:
# self.add(p, commit=commit)
def get_load_holders(self):
p = os.path.join(paths.meta_root, 'load_holders')
return list_directory(p, extension='.txt', remove_extension=True)
def add_load_holder(self, name, path_or_txt, commit=False, add=True):
p = os.path.join(paths.meta_root, 'load_holders', name)
if os.path.isfile(path_or_txt):
shutil.copyfile(path_or_txt, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_txt)
if add:
self.add(p, commit=commit)
def update_level_z(self, irradiation, level, z):
p = self.get_level_path(irradiation, level)
obj = dvc_load(p)
try:
add = obj['z'] != z
obj['z'] = z
except TypeError:
obj = {'z': z, 'positions': obj}
add = True
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def remove_irradiation_position(self, irradiation, level, hole):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if jd:
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd['positions']
z = jd['z']
npositions = [ji for ji in positions if not ji['position'] == hole]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
self.add(p, commit=False)
def new_flux_positions(self, irradiation, level, positions, add=True):
p = self.get_level_path(irradiation, level)
obj = {'positions': positions, 'z': 0}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_fluxes(self, irradiation, level, j, e, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
else:
positions = jd.get('positions')
if positions:
for ip in positions:
ip['j'] = j
ip['j_err'] = e
dvc_dump(jd, p)
if add:
self.add(p, commit=False)
def update_flux(self, irradiation, level, pos, identifier, j, e, mj, me, decay=None,
position_jerr=None,
analyses=None, options=None, add=True):
if options is None:
options = {}
if decay is None:
decay = {}
if analyses is None:
analyses = []
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
npos = {'position': pos, 'j': j, 'j_err': e,
'mean_j': mj, 'mean_j_err': me,
'position_jerr': position_jerr,
'decay_constants': decay,
'identifier': identifier,
'options': options,
'analyses': [{'uuid': ai.uuid,
'record_id': ai.record_id,
'is_omitted': ai.is_omitted()}
for ai in analyses]}
if positions:
added = any((ji['position'] == pos for ji in positions))
npositions = [ji if ji['position'] != pos else npos for ji in positions]
if not added:
npositions.append(npos)
else:
npositions = [npos]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_chronology(self, name, doses):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
dump_chronology(p, doses)
self.add(p, commit=False)
def get_irradiation_holder_names(self):
return glob_list_directory(os.path.join(paths.meta_root, 'irradiation_holders'),
extension='.txt',
remove_extension=True)
# MASKED: get_cocktail_irradiation function (lines 446-467)
def get_default_productions(self):
p = os.path.join(paths.meta_root, 'reactors.json')
if not os.path.isfile(p):
with open(p, 'w') as wfile:
from pychron.file_defaults import REACTORS_DEFAULT
wfile.write(REACTORS_DEFAULT)
return dvc_load(p)
def get_flux_positions(self, irradiation, level):
positions = self._get_level_positions(irradiation, level)
return positions
def get_flux(self, irradiation, level, position):
positions = self.get_flux_positions(irradiation, level)
return self.get_flux_from_positions(position, positions)
def get_flux_from_positions(self, position, positions):
j, je, pe, lambda_k = 0, 0, 0, None
monitor_name, monitor_material, monitor_age = DEFAULT_MONITOR_NAME, 'sanidine', ufloat(28.201, 0)
if positions:
pos = next((p for p in positions if p['position'] == position), None)
if pos:
j, je, pe = pos.get('j', 0), pos.get('j_err', 0), pos.get('position_jerr', 0)
dc = pos.get('decay_constants')
if dc:
# this was a temporary fix and likely can be removed
if isinstance(dc, float):
v, e = dc, 0
else:
v, e = dc.get('lambda_k_total', 0), dc.get('lambda_k_total_error', 0)
lambda_k = ufloat(v, e)
mon = pos.get('monitor')
if mon:
monitor_name = mon.get('name', DEFAULT_MONITOR_NAME)
sa = mon.get('age', 28.201)
se = mon.get('error', 0)
monitor_age = ufloat(sa, se, tag='monitor_age')
monitor_material = mon.get('material', 'sanidine')
fd = {'j': ufloat(j, je, tag='J'),
'position_jerr': pe,
'lambda_k': lambda_k,
'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
return fd
def get_gains(self, name):
g = self.get_gain_obj(name)
return g.gains
def save_sensitivities(self, sens):
ps = []
for k, v in sens.items():
root = os.path.join(paths.meta_root, 'spectrometers')
p = os.path.join(root, add_extension('{}.sens'.format(k), '.json'))
dvc_dump(v, p)
ps.append(p)
if self.add_paths(ps):
self.commit('Updated sensitivity')
def get_sensitivities(self):
specs = {}
root = os.path.join(paths.meta_root, 'spectrometers')
for p in list_directory(root):
if p.endswith('.sens.json'):
name = p.split('.')[0]
p = os.path.join(root, p)
obj = dvc_load(p)
for r in obj:
if r['create_date']:
r['create_date'] = datetime.strptime(r['create_date'], DATE_FORMAT)
specs[name] = obj
return specs
def get_sensitivity(self, name):
sens = self.get_sensitivities()
spec = sens.get(name)
v = 1
if spec:
# get most recent sensitivity
record = spec[-1]
v = record.get('sensitivity', 1)
return v
@cached('clear_cache')
def get_gain_obj(self, name, **kw):
p = gain_path(name)
return Gains(p)
# @cached('clear_cache')
def get_production(self, irrad, level, allow_null=False, **kw):
path = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(path)
pname = obj.get(level, '')
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(pname, ext='.json'))
ip = Production(p, allow_null=allow_null)
# print 'new production id={}, name={}, irrad={}, level={}'.format(id(ip), pname, irrad, level)
return pname, ip
# @cached('clear_cache')
def get_chronology(self, name, allow_null=False, **kw):
chron = None
try:
chron = irradiation_chronology(name, allow_null=allow_null)
if self.application:
chron.use_irradiation_endtime = self.application.get_boolean_preference(
'pychron.arar.constants.use_irradiation_endtime', False)
except MetaObjectException:
if name != 'NoIrradiation':
self.warning('Could not locate the irradiation chronology "{}"'.format(name))
return chron
@cached('clear_cache')
def get_irradiation_holder_holes(self, name, **kw):
return irradiation_geometry_holes(name)
@cached('clear_cache')
def get_load_holder_holes(self, name, **kw):
p = os.path.join(paths.meta_root, 'load_holders', add_extension(name))
holder = LoadGeometry(p)
return holder.holes
@property
def sensitivity_path(self):
return os.path.join(paths.meta_root, 'sensitivity.json')
# private
def _get_level_positions(self, irrad, level):
p = self.get_level_path(irrad, level)
obj = dvc_load(p)
if isinstance(obj, list):
positions = obj
else:
positions = obj.get('positions', [])
return positions
def _update_text(self, tag, name, path_or_blob):
if not name:
self.debug('cannot update text with no name. tag={} name={}'.format(tag, name))
return
root = os.path.join(paths.meta_root, tag)
if not os.path.isdir(root):
r_mkdir(root)
p = os.path.join(root, name)
if os.path.isfile(path_or_blob):
shutil.copyfile(path_or_blob, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_blob)
self.add(p, commit=False)
# ============= EOF =============================================
|
def get_cocktail_irradiation(self):
"""
example cocktail.json
{
"chronology": "2016-06-01 17:00:00",
"j": 4e-4,
"j_err": 4e-9
}
:return:
"""
p = os.path.join(paths.meta_root, 'cocktail.json')
ret = dvc_load(p)
nret = {}
if ret:
lines = ['1.0, {}, {}'.format(ret['chronology'], ret['chronology'])]
c = Chronology.from_lines(lines)
nret['chronology'] = c
nret['flux'] = ufloat(ret['j'], ret['j_err'])
return nret
| 446
| 467
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import shutil
from datetime import datetime
from traits.api import Bool
from uncertainties import ufloat
from pychron.core.helpers.datetime_tools import ISO_FORMAT_STR
from pychron.core.helpers.filetools import glob_list_directory, add_extension, \
list_directory
from pychron.dvc import dvc_dump, dvc_load, repository_path, list_frozen_productions
from pychron.dvc.meta_object import IrradiationGeometry, Chronology, Production, cached, Gains, LoadGeometry, \
MetaObjectException
from pychron.git_archive.repo_manager import GitRepoManager
from pychron.paths import paths, r_mkdir
from pychron.pychron_constants import INTERFERENCE_KEYS, RATIO_KEYS, DEFAULT_MONITOR_NAME, DATE_FORMAT, NULL_STR
# ============= enthought library imports =======================
def irradiation_geometry(name):
p = os.path.join(paths.meta_root, 'irradiation_holders', add_extension(name))
return IrradiationGeometry(p)
def irradiation_geometry_holes(name):
geom = irradiation_geometry(name)
return geom.holes
def irradiation_chronology(name, allow_null=False):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
return Chronology(p, allow_null=allow_null)
def dump_chronology(path, doses):
if doses is None:
doses = []
with open(path, 'w') as wfile:
for p, s, e in doses:
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(p, str):
p = '{:0.3f}'.format(p)
line = '{},{},{}\n'.format(p, s, e)
wfile.write(line)
def gain_path(name):
root = os.path.join(paths.meta_root, 'spectrometers')
if not os.path.isdir(root):
os.mkdir(root)
p = os.path.join(root, add_extension('{}.gain'.format(name), '.json'))
return p
def get_frozen_productions(repo):
prods = {}
for name, path in list_frozen_productions(repo):
prods[name] = Production(path)
return prods
def get_frozen_flux(repo, irradiation):
path = repository_path(repo, '{}.json'.format(irradiation))
fd = {}
if path:
fd = dvc_load(path)
for fi in fd.values():
fi['j'] = ufloat(*fi['j'], tag='J')
return fd
class MetaRepo(GitRepoManager):
clear_cache = Bool
def get_monitor_info(self, irrad, level):
age, decay = NULL_STR, NULL_STR
positions = self._get_level_positions(irrad, level)
# assume all positions have same monitor_age/decay constant. Not strictly true. Potential some ambiquity but
# will not be resolved now 8/26/18.
if positions:
position = positions[0]
opt = position.get('options')
if opt:
age = position.get('monitor_age', NULL_STR)
decayd = position.get('decay_constants')
if decayd:
decay = decayd.get('lambda_k_total', NULL_STR)
return str(age), str(decay)
def add_unstaged(self, *args, **kw):
super(MetaRepo, self).add_unstaged(self.path, **kw)
def save_gains(self, ms, gains_dict):
p = gain_path(ms)
dvc_dump(gains_dict, p)
if self.add_paths(p):
self.commit('Updated gains')
def update_script(self, rootname, name, path_or_blob):
self._update_text(os.path.join('scripts', rootname.lower()), name, path_or_blob)
def update_experiment_queue(self, rootname, name, path_or_blob):
self._update_text(os.path.join('experiments', rootname.lower()), name, path_or_blob)
def update_level_production(self, irrad, name, prname, note=None):
prname = prname.replace(' ', '_')
pathname = add_extension(prname, '.json')
src = os.path.join(paths.meta_root, irrad, 'productions', pathname)
if os.path.isfile(src):
self.update_productions(irrad, name, prname, note=note)
else:
self.warning_dialog('Invalid production name'.format(prname))
def update_level_monitor(self, irradiation, level, monitor_name, monitor_material, monitor_age, lambda_k):
path = self.get_level_path(irradiation, level)
obj = dvc_load(path)
positions = self._get_level_positions(irradiation, level)
options = {'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
decay_constants = {'lambda_k_total': lambda_k, 'lambda_k_total_error': 0}
for p in positions:
p['options'] = options
p['decay_constants'] = decay_constants
obj['positions'] = positions
dvc_dump(obj, path)
def add_production_to_irradiation(self, irrad, name, params, add=True, commit=False):
self.debug('adding production {} to irradiation={}'.format(name, irrad))
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(name, '.json'))
prod = Production(p, new=not os.path.isfile(p))
prod.update(params)
prod.dump()
if add:
self.add(p, commit=commit)
def add_production(self, irrad, name, obj, commit=False, add=True):
p = self.get_production(irrad, name, force=True)
p.attrs = attrs = INTERFERENCE_KEYS + RATIO_KEYS
kef = lambda x: '{}_err'.format(x)
if obj:
def values():
return ((k, getattr(obj, k), kef(k), getattr(obj, kef(k))) for k in attrs)
else:
def values():
return ((k, 0, kef(k), 0) for k in attrs)
for k, v, ke, e in values():
setattr(p, k, v)
setattr(p, ke, e)
p.dump()
if add:
self.add(p.path, commit=commit)
def update_production(self, prod, irradiation=None):
ip = self.get_production(prod.name)
self.debug('saving production {}'.format(prod.name))
params = prod.get_params()
for k, v in params.items():
self.debug('setting {}={}'.format(k, v))
setattr(ip, k, v)
ip.note = prod.note
self.add(ip.path, commit=False)
self.commit('updated production {}'.format(prod.name))
def update_productions(self, irrad, level, production, note=None, add=True):
p = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(p)
obj['note'] = str(note) or ''
if level in obj:
if obj[level] != production:
self.debug('setting production to irrad={}, level={}, prod={}'.format(irrad, level, production))
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
else:
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def set_identifier(self, irradiation, level, pos, identifier):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
positions = self._get_level_positions(irradiation, level)
d = next((p for p in positions if p['position'] == pos), None)
if d:
d['identifier'] = identifier
jd['positions'] = positions
dvc_dump(jd, p)
self.add(p, commit=False)
def get_level_path(self, irrad, level):
return os.path.join(paths.meta_root, irrad, '{}.json'.format(level))
def add_level(self, irrad, level, add=True):
p = self.get_level_path(irrad, level)
lv = dict(z=0, positions=[])
dvc_dump(lv, p)
if add:
self.add(p, commit=False)
def add_chronology(self, irrad, doses, add=True):
p = os.path.join(paths.meta_root, irrad, 'chronology.txt')
dump_chronology(p, doses)
if add:
self.add(p, commit=False)
def add_irradiation(self, name):
p = os.path.join(paths.meta_root, name)
if not os.path.isdir(p):
os.mkdir(p)
def add_position(self, irradiation, level, pos, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
pd = next((p for p in positions if p['position'] == pos), None)
if pd is None:
positions.append({'position': pos, 'decay_constants': {}})
dvc_dump({'z': z, 'positions': positions}, p)
if add:
self.add(p, commit=False)
def add_irradiation_geometry_file(self, path):
try:
holder = IrradiationGeometry(path)
if not holder.holes:
raise BaseException
except BaseException:
self.warning_dialog('Invalid Irradiation Geometry file. Failed to import')
return
self.smart_pull()
root = os.path.join(paths.meta_root, 'irradiation_holders')
if not os.path.isdir(root):
os.mkdir(root)
name = os.path.basename(path)
dest = os.path.join(root, name)
shutil.copyfile(path, dest)
self.add(dest, commit=False)
self.commit('added irradiation geometry file {}'.format(name))
self.push()
self.information_dialog('Irradiation Geometry "{}" added'.format(name))
# p = os.path.join(root, add_extension(name))
# def add_irradiation_holder(self, name, blob, commit=False, overwrite=False, add=True):
# root = os.path.join(paths.meta_root, 'irradiation_holders')
# if not os.path.isdir(root):
# os.mkdir(root)
# p = os.path.join(root, add_extension(name))
#
# if not os.path.isfile(p) or overwrite:
# with open(p, 'w') as wfile:
# holes = list(iter_geom(blob))
# n = len(holes)
# wfile.write('{},0.0175\n'.format(n))
# for idx, (x, y, r) in holes:
# wfile.write('{:0.4f},{:0.4f},{:0.4f}\n'.format(x, y, r))
# if add:
# self.add(p, commit=commit)
def get_load_holders(self):
p = os.path.join(paths.meta_root, 'load_holders')
return list_directory(p, extension='.txt', remove_extension=True)
def add_load_holder(self, name, path_or_txt, commit=False, add=True):
p = os.path.join(paths.meta_root, 'load_holders', name)
if os.path.isfile(path_or_txt):
shutil.copyfile(path_or_txt, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_txt)
if add:
self.add(p, commit=commit)
def update_level_z(self, irradiation, level, z):
p = self.get_level_path(irradiation, level)
obj = dvc_load(p)
try:
add = obj['z'] != z
obj['z'] = z
except TypeError:
obj = {'z': z, 'positions': obj}
add = True
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def remove_irradiation_position(self, irradiation, level, hole):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if jd:
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd['positions']
z = jd['z']
npositions = [ji for ji in positions if not ji['position'] == hole]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
self.add(p, commit=False)
def new_flux_positions(self, irradiation, level, positions, add=True):
p = self.get_level_path(irradiation, level)
obj = {'positions': positions, 'z': 0}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_fluxes(self, irradiation, level, j, e, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
else:
positions = jd.get('positions')
if positions:
for ip in positions:
ip['j'] = j
ip['j_err'] = e
dvc_dump(jd, p)
if add:
self.add(p, commit=False)
def update_flux(self, irradiation, level, pos, identifier, j, e, mj, me, decay=None,
position_jerr=None,
analyses=None, options=None, add=True):
if options is None:
options = {}
if decay is None:
decay = {}
if analyses is None:
analyses = []
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
npos = {'position': pos, 'j': j, 'j_err': e,
'mean_j': mj, 'mean_j_err': me,
'position_jerr': position_jerr,
'decay_constants': decay,
'identifier': identifier,
'options': options,
'analyses': [{'uuid': ai.uuid,
'record_id': ai.record_id,
'is_omitted': ai.is_omitted()}
for ai in analyses]}
if positions:
added = any((ji['position'] == pos for ji in positions))
npositions = [ji if ji['position'] != pos else npos for ji in positions]
if not added:
npositions.append(npos)
else:
npositions = [npos]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_chronology(self, name, doses):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
dump_chronology(p, doses)
self.add(p, commit=False)
def get_irradiation_holder_names(self):
return glob_list_directory(os.path.join(paths.meta_root, 'irradiation_holders'),
extension='.txt',
remove_extension=True)
def get_cocktail_irradiation(self):
"""
example cocktail.json
{
"chronology": "2016-06-01 17:00:00",
"j": 4e-4,
"j_err": 4e-9
}
:return:
"""
p = os.path.join(paths.meta_root, 'cocktail.json')
ret = dvc_load(p)
nret = {}
if ret:
lines = ['1.0, {}, {}'.format(ret['chronology'], ret['chronology'])]
c = Chronology.from_lines(lines)
nret['chronology'] = c
nret['flux'] = ufloat(ret['j'], ret['j_err'])
return nret
def get_default_productions(self):
p = os.path.join(paths.meta_root, 'reactors.json')
if not os.path.isfile(p):
with open(p, 'w') as wfile:
from pychron.file_defaults import REACTORS_DEFAULT
wfile.write(REACTORS_DEFAULT)
return dvc_load(p)
def get_flux_positions(self, irradiation, level):
positions = self._get_level_positions(irradiation, level)
return positions
def get_flux(self, irradiation, level, position):
positions = self.get_flux_positions(irradiation, level)
return self.get_flux_from_positions(position, positions)
def get_flux_from_positions(self, position, positions):
j, je, pe, lambda_k = 0, 0, 0, None
monitor_name, monitor_material, monitor_age = DEFAULT_MONITOR_NAME, 'sanidine', ufloat(28.201, 0)
if positions:
pos = next((p for p in positions if p['position'] == position), None)
if pos:
j, je, pe = pos.get('j', 0), pos.get('j_err', 0), pos.get('position_jerr', 0)
dc = pos.get('decay_constants')
if dc:
# this was a temporary fix and likely can be removed
if isinstance(dc, float):
v, e = dc, 0
else:
v, e = dc.get('lambda_k_total', 0), dc.get('lambda_k_total_error', 0)
lambda_k = ufloat(v, e)
mon = pos.get('monitor')
if mon:
monitor_name = mon.get('name', DEFAULT_MONITOR_NAME)
sa = mon.get('age', 28.201)
se = mon.get('error', 0)
monitor_age = ufloat(sa, se, tag='monitor_age')
monitor_material = mon.get('material', 'sanidine')
fd = {'j': ufloat(j, je, tag='J'),
'position_jerr': pe,
'lambda_k': lambda_k,
'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
return fd
def get_gains(self, name):
g = self.get_gain_obj(name)
return g.gains
def save_sensitivities(self, sens):
ps = []
for k, v in sens.items():
root = os.path.join(paths.meta_root, 'spectrometers')
p = os.path.join(root, add_extension('{}.sens'.format(k), '.json'))
dvc_dump(v, p)
ps.append(p)
if self.add_paths(ps):
self.commit('Updated sensitivity')
def get_sensitivities(self):
specs = {}
root = os.path.join(paths.meta_root, 'spectrometers')
for p in list_directory(root):
if p.endswith('.sens.json'):
name = p.split('.')[0]
p = os.path.join(root, p)
obj = dvc_load(p)
for r in obj:
if r['create_date']:
r['create_date'] = datetime.strptime(r['create_date'], DATE_FORMAT)
specs[name] = obj
return specs
def get_sensitivity(self, name):
sens = self.get_sensitivities()
spec = sens.get(name)
v = 1
if spec:
# get most recent sensitivity
record = spec[-1]
v = record.get('sensitivity', 1)
return v
@cached('clear_cache')
def get_gain_obj(self, name, **kw):
p = gain_path(name)
return Gains(p)
# @cached('clear_cache')
def get_production(self, irrad, level, allow_null=False, **kw):
path = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(path)
pname = obj.get(level, '')
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(pname, ext='.json'))
ip = Production(p, allow_null=allow_null)
# print 'new production id={}, name={}, irrad={}, level={}'.format(id(ip), pname, irrad, level)
return pname, ip
# @cached('clear_cache')
def get_chronology(self, name, allow_null=False, **kw):
chron = None
try:
chron = irradiation_chronology(name, allow_null=allow_null)
if self.application:
chron.use_irradiation_endtime = self.application.get_boolean_preference(
'pychron.arar.constants.use_irradiation_endtime', False)
except MetaObjectException:
if name != 'NoIrradiation':
self.warning('Could not locate the irradiation chronology "{}"'.format(name))
return chron
@cached('clear_cache')
def get_irradiation_holder_holes(self, name, **kw):
return irradiation_geometry_holes(name)
@cached('clear_cache')
def get_load_holder_holes(self, name, **kw):
p = os.path.join(paths.meta_root, 'load_holders', add_extension(name))
holder = LoadGeometry(p)
return holder.holes
@property
def sensitivity_path(self):
return os.path.join(paths.meta_root, 'sensitivity.json')
# private
def _get_level_positions(self, irrad, level):
p = self.get_level_path(irrad, level)
obj = dvc_load(p)
if isinstance(obj, list):
positions = obj
else:
positions = obj.get('positions', [])
return positions
def _update_text(self, tag, name, path_or_blob):
if not name:
self.debug('cannot update text with no name. tag={} name={}'.format(tag, name))
return
root = os.path.join(paths.meta_root, tag)
if not os.path.isdir(root):
r_mkdir(root)
p = os.path.join(root, name)
if os.path.isfile(path_or_blob):
shutil.copyfile(path_or_blob, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_blob)
self.add(p, commit=False)
# ============= EOF =============================================
|
api_request
|
Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
|
from infoblox_netmri.utils.utils import locate, to_snake
from infoblox_netmri.api.exceptions.netmri_exceptions import NotImplementedException
class Broker(object):
""" Base class for broker instances, provides methods for API requests.
And return responces wrapped with specific class
:param client: InfobloxNetMRI client
"""
controller = None
def __init__(self, client):
self.client = client
# MASKED: api_request function (lines 16-38)
# See NETMRI-31545
def api_mixed_request(self, method_name, params):
""" Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
def api_list_request(self, method_name, params):
""" Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if not data:
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print("Sorry, this method will be implemented in the\
future versions of NetMRI")
raise NotImplementedException(self.controller, method_name)
def _get_method_fullname(self, method):
""" Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path
"""
return "{}/{}".format(self.controller, method)
def _get_return_object_type(self, data):
""" Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class
"""
if not data or type(data) != dict:
return data
class_name = data.get("_class")
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client)
def _get_remote_class_name(self, name):
""" Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model
"""
return "infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote".format(
pckg=to_snake(name),
name=name
)
|
def api_request(self, method_name, params):
""" Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if isinstance(data, dict) and len(data) > 1:
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
| 16
| 38
|
from infoblox_netmri.utils.utils import locate, to_snake
from infoblox_netmri.api.exceptions.netmri_exceptions import NotImplementedException
class Broker(object):
""" Base class for broker instances, provides methods for API requests.
And return responces wrapped with specific class
:param client: InfobloxNetMRI client
"""
controller = None
def __init__(self, client):
self.client = client
def api_request(self, method_name, params):
""" Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if isinstance(data, dict) and len(data) > 1:
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
# See NETMRI-31545
def api_mixed_request(self, method_name, params):
""" Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
def api_list_request(self, method_name, params):
""" Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if not data:
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print("Sorry, this method will be implemented in the\
future versions of NetMRI")
raise NotImplementedException(self.controller, method_name)
def _get_method_fullname(self, method):
""" Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path
"""
return "{}/{}".format(self.controller, method)
def _get_return_object_type(self, data):
""" Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class
"""
if not data or type(data) != dict:
return data
class_name = data.get("_class")
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client)
def _get_remote_class_name(self, name):
""" Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model
"""
return "infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote".format(
pckg=to_snake(name),
name=name
)
|
api_mixed_request
|
Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
|
from infoblox_netmri.utils.utils import locate, to_snake
from infoblox_netmri.api.exceptions.netmri_exceptions import NotImplementedException
class Broker(object):
""" Base class for broker instances, provides methods for API requests.
And return responces wrapped with specific class
:param client: InfobloxNetMRI client
"""
controller = None
def __init__(self, client):
self.client = client
def api_request(self, method_name, params):
""" Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if isinstance(data, dict) and len(data) > 1:
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
# See NETMRI-31545
# MASKED: api_mixed_request function (lines 41-59)
def api_list_request(self, method_name, params):
""" Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if not data:
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print("Sorry, this method will be implemented in the\
future versions of NetMRI")
raise NotImplementedException(self.controller, method_name)
def _get_method_fullname(self, method):
""" Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path
"""
return "{}/{}".format(self.controller, method)
def _get_return_object_type(self, data):
""" Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class
"""
if not data or type(data) != dict:
return data
class_name = data.get("_class")
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client)
def _get_remote_class_name(self, name):
""" Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model
"""
return "infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote".format(
pckg=to_snake(name),
name=name
)
|
def api_mixed_request(self, method_name, params):
""" Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
| 41
| 59
|
from infoblox_netmri.utils.utils import locate, to_snake
from infoblox_netmri.api.exceptions.netmri_exceptions import NotImplementedException
class Broker(object):
""" Base class for broker instances, provides methods for API requests.
And return responces wrapped with specific class
:param client: InfobloxNetMRI client
"""
controller = None
def __init__(self, client):
self.client = client
def api_request(self, method_name, params):
""" Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if isinstance(data, dict) and len(data) > 1:
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
# See NETMRI-31545
def api_mixed_request(self, method_name, params):
""" Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
def api_list_request(self, method_name, params):
""" Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if not data:
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print("Sorry, this method will be implemented in the\
future versions of NetMRI")
raise NotImplementedException(self.controller, method_name)
def _get_method_fullname(self, method):
""" Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path
"""
return "{}/{}".format(self.controller, method)
def _get_return_object_type(self, data):
""" Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class
"""
if not data or type(data) != dict:
return data
class_name = data.get("_class")
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client)
def _get_remote_class_name(self, name):
""" Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model
"""
return "infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote".format(
pckg=to_snake(name),
name=name
)
|
api_list_request
|
Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
|
from infoblox_netmri.utils.utils import locate, to_snake
from infoblox_netmri.api.exceptions.netmri_exceptions import NotImplementedException
class Broker(object):
""" Base class for broker instances, provides methods for API requests.
And return responces wrapped with specific class
:param client: InfobloxNetMRI client
"""
controller = None
def __init__(self, client):
self.client = client
def api_request(self, method_name, params):
""" Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if isinstance(data, dict) and len(data) > 1:
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
# See NETMRI-31545
def api_mixed_request(self, method_name, params):
""" Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
# MASKED: api_list_request function (lines 61-76)
def _get_method_fullname(self, method):
""" Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path
"""
return "{}/{}".format(self.controller, method)
def _get_return_object_type(self, data):
""" Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class
"""
if not data or type(data) != dict:
return data
class_name = data.get("_class")
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client)
def _get_remote_class_name(self, name):
""" Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model
"""
return "infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote".format(
pckg=to_snake(name),
name=name
)
|
def api_list_request(self, method_name, params):
""" Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if not data:
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print("Sorry, this method will be implemented in the\
future versions of NetMRI")
raise NotImplementedException(self.controller, method_name)
| 61
| 76
|
from infoblox_netmri.utils.utils import locate, to_snake
from infoblox_netmri.api.exceptions.netmri_exceptions import NotImplementedException
class Broker(object):
""" Base class for broker instances, provides methods for API requests.
And return responces wrapped with specific class
:param client: InfobloxNetMRI client
"""
controller = None
def __init__(self, client):
self.client = client
def api_request(self, method_name, params):
""" Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if isinstance(data, dict) and len(data) > 1:
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
# See NETMRI-31545
def api_mixed_request(self, method_name, params):
""" Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
def api_list_request(self, method_name, params):
""" Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if not data:
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print("Sorry, this method will be implemented in the\
future versions of NetMRI")
raise NotImplementedException(self.controller, method_name)
def _get_method_fullname(self, method):
""" Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path
"""
return "{}/{}".format(self.controller, method)
def _get_return_object_type(self, data):
""" Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class
"""
if not data or type(data) != dict:
return data
class_name = data.get("_class")
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client)
def _get_remote_class_name(self, name):
""" Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model
"""
return "infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote".format(
pckg=to_snake(name),
name=name
)
|
__init__
|
Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '
' will be added.
Returns:
A _TextSink object usable for writing.
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A source and a sink for reading from and writing to text files."""
# pytype: skip-file
from __future__ import absolute_import
import logging
from builtins import object
from builtins import range
from functools import partial
from typing import Optional
from past.builtins import long
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
__all__ = [
'ReadFromText',
'ReadFromTextWithFilename',
'ReadAllFromText',
'WriteToText'
]
_LOGGER = logging.getLogger(__name__)
class _TextSource(filebasedsource.FileBasedSource):
r"""A source for reading text files.
Parses a text file as newline-delimited elements. Supports newline delimiters
'\n' and '\r\n.
This implementation only supports reading text encoded using UTF-8 or
ASCII.
"""
DEFAULT_READ_BUFFER_SIZE = 8192
class ReadBuffer(object):
# A buffer that gives the buffered data and next position in the
# buffer that should be read.
def __init__(self, data, position):
self._data = data
self._position = position
@property
def data(self):
return self._data
@data.setter
def data(self, value):
assert isinstance(value, bytes)
self._data = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
assert isinstance(value, (int, long))
if value > len(self._data):
raise ValueError(
'Cannot set position to %d since it\'s larger than '
'size of data %d.' % (value, len(self._data)))
self._position = value
def reset(self):
self.data = b''
self.position = 0
def __init__(self,
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder, # type: coders.Coder
buffer_size=DEFAULT_READ_BUFFER_SIZE,
validate=True,
skip_header_lines=0,
header_processor_fns=(None, None)):
"""Initialize a _TextSource
Args:
header_processor_fns (tuple): a tuple of a `header_matcher` function
and a `header_processor` function. The `header_matcher` should
return `True` for all lines at the start of the file that are part
of the file header and `False` otherwise. These header lines will
not be yielded when reading records and instead passed into
`header_processor` to be handled. If `skip_header_lines` and a
`header_matcher` are both provided, the value of `skip_header_lines`
lines will be skipped and the header will be processed from
there.
Raises:
ValueError: if skip_lines is negative.
Please refer to documentation in class `ReadFromText` for the rest
of the arguments.
"""
super(_TextSource, self).__init__(
file_pattern,
min_bundle_size,
compression_type=compression_type,
validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if skip_header_lines < 0:
raise ValueError(
'Cannot skip negative number of header lines: %d' % skip_header_lines)
elif skip_header_lines > 10:
_LOGGER.warning(
'Skipping %d header lines. Skipping large number of header '
'lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
self._header_matcher, self._header_processor = header_processor_fns
def display_data(self):
parent_dd = super(_TextSource, self).display_data()
parent_dd['strip_newline'] = DisplayDataItem(
self._strip_trailing_newlines, label='Strip Trailing New Lines')
parent_dd['buffer_size'] = DisplayDataItem(
self._buffer_size, label='Buffer Size')
parent_dd['coder'] = DisplayDataItem(self._coder.__class__, label='Coder')
return parent_dd
def read_records(self, file_name, range_tracker):
start_offset = range_tracker.start_position()
read_buffer = _TextSource.ReadBuffer(b'', 0)
next_record_start_position = -1
def split_points_unclaimed(stop_position):
return (
0 if stop_position <= next_record_start_position else
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
with self.open_file(file_name) as file_to_read:
position_after_processing_header_lines = (
self._process_header(file_to_read, read_buffer))
start_offset = max(start_offset, position_after_processing_header_lines)
if start_offset > position_after_processing_header_lines:
# Seeking to one position before the start index and ignoring the
# current line. If start_position is at beginning if the line, that line
# belongs to the current bundle, hence ignoring that is incorrect.
# Seeking to one byte before prevents that.
file_to_read.seek(start_offset - 1)
read_buffer.reset()
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
if not sep_bounds:
# Could not find a separator after (start_offset - 1). This means that
# none of the records within the file belongs to the current source.
return
_, sep_end = sep_bounds
read_buffer.data = read_buffer.data[sep_end:]
next_record_start_position = start_offset - 1 + sep_end
else:
next_record_start_position = position_after_processing_header_lines
while range_tracker.try_claim(next_record_start_position):
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
# For compressed text files that use an unsplittable OffsetRangeTracker
# with infinity as the end position, above 'try_claim()' invocation
# would pass for an empty record at the end of file that is not
# followed by a new line character. Since such a record is at the last
# position of a file, it should not be a part of the considered range.
# We do this check to ignore such records.
if len(record) == 0 and num_bytes_to_next_record < 0: # pylint: disable=len-as-condition
break
# Record separator must be larger than zero bytes.
assert num_bytes_to_next_record != 0
if num_bytes_to_next_record > 0:
next_record_start_position += num_bytes_to_next_record
yield self._coder.decode(record)
if num_bytes_to_next_record < 0:
break
def _process_header(self, file_to_read, read_buffer):
# Returns a tuple containing the position in file after processing header
# records and a list of decoded header lines that match
# 'header_matcher'.
header_lines = []
position = self._skip_lines(
file_to_read, read_buffer,
self._skip_header_lines) if self._skip_header_lines else 0
if self._header_matcher:
while True:
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
decoded_line = self._coder.decode(record)
if not self._header_matcher(decoded_line):
# We've read past the header section at this point, so go back a line.
file_to_read.seek(position)
read_buffer.reset()
break
header_lines.append(decoded_line)
if num_bytes_to_next_record < 0:
break
position += num_bytes_to_next_record
if self._header_processor:
self._header_processor(header_lines)
return position
def _find_separator_bounds(self, file_to_read, read_buffer):
# Determines the start and end positions within 'read_buffer.data' of the
# next separator starting from position 'read_buffer.position'.
# Currently supports following separators.
# * '\n'
# * '\r\n'
# This method may increase the size of buffer but it will not decrease the
# size of it.
current_pos = read_buffer.position
while True:
if current_pos >= len(read_buffer.data):
# Ensuring that there are enough bytes to determine if there is a '\n'
# at current_pos.
if not self._try_to_ensure_num_bytes_in_buffer(
file_to_read, read_buffer, current_pos + 1):
return
# Using find() here is more efficient than a linear scan of the byte
# array.
next_lf = read_buffer.data.find(b'\n', current_pos)
if next_lf >= 0:
if next_lf > 0 and read_buffer.data[next_lf - 1:next_lf] == b'\r':
# Found a '\r\n'. Accepting that as the next separator.
return (next_lf - 1, next_lf + 1)
else:
# Found a '\n'. Accepting that as the next separator.
return (next_lf, next_lf + 1)
current_pos = len(read_buffer.data)
def _try_to_ensure_num_bytes_in_buffer(
self, file_to_read, read_buffer, num_bytes):
# Tries to ensure that there are at least num_bytes bytes in the buffer.
# Returns True if this can be fulfilled, returned False if this cannot be
# fulfilled due to reaching EOF.
while len(read_buffer.data) < num_bytes:
read_data = file_to_read.read(self._buffer_size)
if not read_data:
return False
read_buffer.data += read_data
return True
def _skip_lines(self, file_to_read, read_buffer, num_lines):
"""Skip num_lines from file_to_read, return num_lines+1 start position."""
if file_to_read.tell() > 0:
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
_, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer)
if num_bytes_to_next_record < 0:
# We reached end of file. It is OK to just break here
# because subsequent _read_record will return same result.
break
position += num_bytes_to_next_record
return position
def _read_record(self, file_to_read, read_buffer):
# Returns a tuple containing the current_record and number of bytes to the
# next record starting from 'read_buffer.position'. If EOF is
# reached, returns a tuple containing the current record and -1.
if read_buffer.position > self._buffer_size:
# read_buffer is too large. Truncating and adjusting it.
read_buffer.data = read_buffer.data[read_buffer.position:]
read_buffer.position = 0
record_start_position_in_buffer = read_buffer.position
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
read_buffer.position = sep_bounds[1] if sep_bounds else len(
read_buffer.data)
if not sep_bounds:
# Reached EOF. Bytes up to the EOF is the next record. Returning '-1' for
# the starting position of the next record.
return (read_buffer.data[record_start_position_in_buffer:], -1)
if self._strip_trailing_newlines:
# Current record should not contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[0]],
sep_bounds[1] - record_start_position_in_buffer)
else:
# Current record should contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[1]],
sep_bounds[1] - record_start_position_in_buffer)
class _TextSourceWithFilename(_TextSource):
def read_records(self, file_name, range_tracker):
records = super(_TextSourceWithFilename,
self).read_records(file_name, range_tracker)
for record in records:
yield (file_name, record)
class _TextSink(filebasedsink.FileBasedSink):
"""A sink to a GCS or local text file or files."""
# MASKED: __init__ function (lines 345-398)
def open(self, temp_path):
file_handle = super(_TextSink, self).open(temp_path)
if self._header is not None:
file_handle.write(coders.ToStringCoder().encode(self._header))
if self._append_trailing_newlines:
file_handle.write(b'\n')
return file_handle
def display_data(self):
dd_parent = super(_TextSink, self).display_data()
dd_parent['append_newline'] = DisplayDataItem(
self._append_trailing_newlines, label='Append Trailing New Lines')
return dd_parent
def write_encoded_record(self, file_handle, encoded_value):
"""Writes a single encoded record."""
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n')
def _create_text_source(
file_pattern=None,
min_bundle_size=None,
compression_type=None,
strip_trailing_newlines=None,
coder=None,
skip_header_lines=None):
return _TextSource(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
validate=False,
skip_header_lines=skip_header_lines)
class ReadAllFromText(PTransform):
"""A ``PTransform`` for reading a ``PCollection`` of text files.
Reads a ``PCollection`` of text files or file patterns and and produces a
``PCollection`` of strings.
Parses a text file as newline-delimited elements, by default assuming
UTF-8 encoding. Supports newline delimiters '\\n' and '\\r\\n'.
This implementation only supports reading text encoded using UTF-8 or ASCII.
This does not support other encodings such as UTF-16 or UTF-32.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
min_bundle_size=0,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
skip_header_lines=0,
**kwargs):
"""Initialize the ``ReadAllFromText`` transform.
Args:
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is ``CompressionTypes.AUTO``, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
coder: Coder used to decode each line.
"""
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(
_create_text_source,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(
True,
compression_type,
desired_bundle_size,
min_bundle_size,
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class ReadFromText(PTransform):
r"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading text
files.
Parses a text file as newline-delimited elements, by default assuming
``UTF-8`` encoding. Supports newline delimiters ``\n`` and ``\r\n``.
This implementation only supports reading text encoded using ``UTF-8`` or
``ASCII``.
This does not support other encodings such as ``UTF-16`` or ``UTF-32``.
"""
_source_class = _TextSource
def __init__(
self,
file_pattern=None,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
validate=True,
skip_header_lines=0,
**kwargs):
"""Initialize the :class:`ReadFromText` transform.
Args:
file_pattern (str): The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
min_bundle_size (int): Minimum size of bundles that should be generated
when splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
strip_trailing_newlines (bool): Indicates whether this source should
remove the newline char in each line it reads before decoding that line.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
skip_header_lines (int): Number of header lines to skip. Same number is
skipped from each source file. Must be 0 or higher. Large number of
skipped lines might impact performance.
coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.
"""
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder,
validate=validate,
skip_header_lines=skip_header_lines)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromTextWithFilename(ReadFromText):
r"""A :class:`~apache_beam.io.textio.ReadFromText` for reading text
files returning the name of the file and the content of the file.
This class extend ReadFromText class just setting a different
_source_class attribute.
"""
_source_class = _TextSourceWithFilename
class WriteToText(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for writing to
text files."""
def __init__(
self,
file_path_prefix, # type: str
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None, # type: Optional[str]
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
r"""Initialize a :class:`WriteToText` transform.
Args:
file_path_prefix (str): The file path to write to. The files written will
begin with this prefix, followed by a shard identifier (see
**num_shards**), and end in a common extension, if given by
**file_name_suffix**. In most cases, only this argument is specified and
**num_shards**, **shard_name_template**, and **file_name_suffix** use
default values.
file_name_suffix (str): Suffix for the files written.
append_trailing_newlines (bool): indicate whether this sink should write
an additional newline char after writing each element.
num_shards (int): The number of files (shards) used for output.
If not set, the service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template (str): A template string containing placeholders for
the shard number and shard count. Currently only ``''`` and
``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded
shard number and shard count respectively. This argument can be ``''``
in which case it behaves as if num_shards was set to 1 and only one file
will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.
coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.
compression_type (str): Used to handle compressed output files.
Typical value is :class:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
final file path's extension (as determined by **file_path_prefix**,
**file_name_suffix**, **num_shards** and **shard_name_template**) will
be used to detect the compression.
header (str): String to write at beginning of file as a header.
If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will
be added.
"""
self._sink = _TextSink(
file_path_prefix,
file_name_suffix,
append_trailing_newlines,
num_shards,
shard_name_template,
coder,
compression_type,
header)
def expand(self, pcoll):
return pcoll | Write(self._sink)
|
def __init__(self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
"""Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '\n' will be added.
Returns:
A _TextSink object usable for writing.
"""
super(_TextSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=coder,
mime_type='text/plain',
compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header
| 345
| 398
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A source and a sink for reading from and writing to text files."""
# pytype: skip-file
from __future__ import absolute_import
import logging
from builtins import object
from builtins import range
from functools import partial
from typing import Optional
from past.builtins import long
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
__all__ = [
'ReadFromText',
'ReadFromTextWithFilename',
'ReadAllFromText',
'WriteToText'
]
_LOGGER = logging.getLogger(__name__)
class _TextSource(filebasedsource.FileBasedSource):
r"""A source for reading text files.
Parses a text file as newline-delimited elements. Supports newline delimiters
'\n' and '\r\n.
This implementation only supports reading text encoded using UTF-8 or
ASCII.
"""
DEFAULT_READ_BUFFER_SIZE = 8192
class ReadBuffer(object):
# A buffer that gives the buffered data and next position in the
# buffer that should be read.
def __init__(self, data, position):
self._data = data
self._position = position
@property
def data(self):
return self._data
@data.setter
def data(self, value):
assert isinstance(value, bytes)
self._data = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
assert isinstance(value, (int, long))
if value > len(self._data):
raise ValueError(
'Cannot set position to %d since it\'s larger than '
'size of data %d.' % (value, len(self._data)))
self._position = value
def reset(self):
self.data = b''
self.position = 0
def __init__(self,
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder, # type: coders.Coder
buffer_size=DEFAULT_READ_BUFFER_SIZE,
validate=True,
skip_header_lines=0,
header_processor_fns=(None, None)):
"""Initialize a _TextSource
Args:
header_processor_fns (tuple): a tuple of a `header_matcher` function
and a `header_processor` function. The `header_matcher` should
return `True` for all lines at the start of the file that are part
of the file header and `False` otherwise. These header lines will
not be yielded when reading records and instead passed into
`header_processor` to be handled. If `skip_header_lines` and a
`header_matcher` are both provided, the value of `skip_header_lines`
lines will be skipped and the header will be processed from
there.
Raises:
ValueError: if skip_lines is negative.
Please refer to documentation in class `ReadFromText` for the rest
of the arguments.
"""
super(_TextSource, self).__init__(
file_pattern,
min_bundle_size,
compression_type=compression_type,
validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if skip_header_lines < 0:
raise ValueError(
'Cannot skip negative number of header lines: %d' % skip_header_lines)
elif skip_header_lines > 10:
_LOGGER.warning(
'Skipping %d header lines. Skipping large number of header '
'lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
self._header_matcher, self._header_processor = header_processor_fns
def display_data(self):
parent_dd = super(_TextSource, self).display_data()
parent_dd['strip_newline'] = DisplayDataItem(
self._strip_trailing_newlines, label='Strip Trailing New Lines')
parent_dd['buffer_size'] = DisplayDataItem(
self._buffer_size, label='Buffer Size')
parent_dd['coder'] = DisplayDataItem(self._coder.__class__, label='Coder')
return parent_dd
def read_records(self, file_name, range_tracker):
start_offset = range_tracker.start_position()
read_buffer = _TextSource.ReadBuffer(b'', 0)
next_record_start_position = -1
def split_points_unclaimed(stop_position):
return (
0 if stop_position <= next_record_start_position else
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
with self.open_file(file_name) as file_to_read:
position_after_processing_header_lines = (
self._process_header(file_to_read, read_buffer))
start_offset = max(start_offset, position_after_processing_header_lines)
if start_offset > position_after_processing_header_lines:
# Seeking to one position before the start index and ignoring the
# current line. If start_position is at beginning if the line, that line
# belongs to the current bundle, hence ignoring that is incorrect.
# Seeking to one byte before prevents that.
file_to_read.seek(start_offset - 1)
read_buffer.reset()
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
if not sep_bounds:
# Could not find a separator after (start_offset - 1). This means that
# none of the records within the file belongs to the current source.
return
_, sep_end = sep_bounds
read_buffer.data = read_buffer.data[sep_end:]
next_record_start_position = start_offset - 1 + sep_end
else:
next_record_start_position = position_after_processing_header_lines
while range_tracker.try_claim(next_record_start_position):
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
# For compressed text files that use an unsplittable OffsetRangeTracker
# with infinity as the end position, above 'try_claim()' invocation
# would pass for an empty record at the end of file that is not
# followed by a new line character. Since such a record is at the last
# position of a file, it should not be a part of the considered range.
# We do this check to ignore such records.
if len(record) == 0 and num_bytes_to_next_record < 0: # pylint: disable=len-as-condition
break
# Record separator must be larger than zero bytes.
assert num_bytes_to_next_record != 0
if num_bytes_to_next_record > 0:
next_record_start_position += num_bytes_to_next_record
yield self._coder.decode(record)
if num_bytes_to_next_record < 0:
break
def _process_header(self, file_to_read, read_buffer):
# Returns a tuple containing the position in file after processing header
# records and a list of decoded header lines that match
# 'header_matcher'.
header_lines = []
position = self._skip_lines(
file_to_read, read_buffer,
self._skip_header_lines) if self._skip_header_lines else 0
if self._header_matcher:
while True:
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
decoded_line = self._coder.decode(record)
if not self._header_matcher(decoded_line):
# We've read past the header section at this point, so go back a line.
file_to_read.seek(position)
read_buffer.reset()
break
header_lines.append(decoded_line)
if num_bytes_to_next_record < 0:
break
position += num_bytes_to_next_record
if self._header_processor:
self._header_processor(header_lines)
return position
def _find_separator_bounds(self, file_to_read, read_buffer):
# Determines the start and end positions within 'read_buffer.data' of the
# next separator starting from position 'read_buffer.position'.
# Currently supports following separators.
# * '\n'
# * '\r\n'
# This method may increase the size of buffer but it will not decrease the
# size of it.
current_pos = read_buffer.position
while True:
if current_pos >= len(read_buffer.data):
# Ensuring that there are enough bytes to determine if there is a '\n'
# at current_pos.
if not self._try_to_ensure_num_bytes_in_buffer(
file_to_read, read_buffer, current_pos + 1):
return
# Using find() here is more efficient than a linear scan of the byte
# array.
next_lf = read_buffer.data.find(b'\n', current_pos)
if next_lf >= 0:
if next_lf > 0 and read_buffer.data[next_lf - 1:next_lf] == b'\r':
# Found a '\r\n'. Accepting that as the next separator.
return (next_lf - 1, next_lf + 1)
else:
# Found a '\n'. Accepting that as the next separator.
return (next_lf, next_lf + 1)
current_pos = len(read_buffer.data)
def _try_to_ensure_num_bytes_in_buffer(
self, file_to_read, read_buffer, num_bytes):
# Tries to ensure that there are at least num_bytes bytes in the buffer.
# Returns True if this can be fulfilled, returned False if this cannot be
# fulfilled due to reaching EOF.
while len(read_buffer.data) < num_bytes:
read_data = file_to_read.read(self._buffer_size)
if not read_data:
return False
read_buffer.data += read_data
return True
def _skip_lines(self, file_to_read, read_buffer, num_lines):
"""Skip num_lines from file_to_read, return num_lines+1 start position."""
if file_to_read.tell() > 0:
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
_, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer)
if num_bytes_to_next_record < 0:
# We reached end of file. It is OK to just break here
# because subsequent _read_record will return same result.
break
position += num_bytes_to_next_record
return position
def _read_record(self, file_to_read, read_buffer):
# Returns a tuple containing the current_record and number of bytes to the
# next record starting from 'read_buffer.position'. If EOF is
# reached, returns a tuple containing the current record and -1.
if read_buffer.position > self._buffer_size:
# read_buffer is too large. Truncating and adjusting it.
read_buffer.data = read_buffer.data[read_buffer.position:]
read_buffer.position = 0
record_start_position_in_buffer = read_buffer.position
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
read_buffer.position = sep_bounds[1] if sep_bounds else len(
read_buffer.data)
if not sep_bounds:
# Reached EOF. Bytes up to the EOF is the next record. Returning '-1' for
# the starting position of the next record.
return (read_buffer.data[record_start_position_in_buffer:], -1)
if self._strip_trailing_newlines:
# Current record should not contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[0]],
sep_bounds[1] - record_start_position_in_buffer)
else:
# Current record should contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[1]],
sep_bounds[1] - record_start_position_in_buffer)
class _TextSourceWithFilename(_TextSource):
def read_records(self, file_name, range_tracker):
records = super(_TextSourceWithFilename,
self).read_records(file_name, range_tracker)
for record in records:
yield (file_name, record)
class _TextSink(filebasedsink.FileBasedSink):
"""A sink to a GCS or local text file or files."""
def __init__(self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
"""Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '\n' will be added.
Returns:
A _TextSink object usable for writing.
"""
super(_TextSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=coder,
mime_type='text/plain',
compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header
def open(self, temp_path):
file_handle = super(_TextSink, self).open(temp_path)
if self._header is not None:
file_handle.write(coders.ToStringCoder().encode(self._header))
if self._append_trailing_newlines:
file_handle.write(b'\n')
return file_handle
def display_data(self):
dd_parent = super(_TextSink, self).display_data()
dd_parent['append_newline'] = DisplayDataItem(
self._append_trailing_newlines, label='Append Trailing New Lines')
return dd_parent
def write_encoded_record(self, file_handle, encoded_value):
"""Writes a single encoded record."""
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n')
def _create_text_source(
file_pattern=None,
min_bundle_size=None,
compression_type=None,
strip_trailing_newlines=None,
coder=None,
skip_header_lines=None):
return _TextSource(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
validate=False,
skip_header_lines=skip_header_lines)
class ReadAllFromText(PTransform):
"""A ``PTransform`` for reading a ``PCollection`` of text files.
Reads a ``PCollection`` of text files or file patterns and and produces a
``PCollection`` of strings.
Parses a text file as newline-delimited elements, by default assuming
UTF-8 encoding. Supports newline delimiters '\\n' and '\\r\\n'.
This implementation only supports reading text encoded using UTF-8 or ASCII.
This does not support other encodings such as UTF-16 or UTF-32.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
min_bundle_size=0,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
skip_header_lines=0,
**kwargs):
"""Initialize the ``ReadAllFromText`` transform.
Args:
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is ``CompressionTypes.AUTO``, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
coder: Coder used to decode each line.
"""
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(
_create_text_source,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(
True,
compression_type,
desired_bundle_size,
min_bundle_size,
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class ReadFromText(PTransform):
r"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading text
files.
Parses a text file as newline-delimited elements, by default assuming
``UTF-8`` encoding. Supports newline delimiters ``\n`` and ``\r\n``.
This implementation only supports reading text encoded using ``UTF-8`` or
``ASCII``.
This does not support other encodings such as ``UTF-16`` or ``UTF-32``.
"""
_source_class = _TextSource
def __init__(
self,
file_pattern=None,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
validate=True,
skip_header_lines=0,
**kwargs):
"""Initialize the :class:`ReadFromText` transform.
Args:
file_pattern (str): The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
min_bundle_size (int): Minimum size of bundles that should be generated
when splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
strip_trailing_newlines (bool): Indicates whether this source should
remove the newline char in each line it reads before decoding that line.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
skip_header_lines (int): Number of header lines to skip. Same number is
skipped from each source file. Must be 0 or higher. Large number of
skipped lines might impact performance.
coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.
"""
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder,
validate=validate,
skip_header_lines=skip_header_lines)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromTextWithFilename(ReadFromText):
r"""A :class:`~apache_beam.io.textio.ReadFromText` for reading text
files returning the name of the file and the content of the file.
This class extend ReadFromText class just setting a different
_source_class attribute.
"""
_source_class = _TextSourceWithFilename
class WriteToText(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for writing to
text files."""
def __init__(
self,
file_path_prefix, # type: str
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None, # type: Optional[str]
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
r"""Initialize a :class:`WriteToText` transform.
Args:
file_path_prefix (str): The file path to write to. The files written will
begin with this prefix, followed by a shard identifier (see
**num_shards**), and end in a common extension, if given by
**file_name_suffix**. In most cases, only this argument is specified and
**num_shards**, **shard_name_template**, and **file_name_suffix** use
default values.
file_name_suffix (str): Suffix for the files written.
append_trailing_newlines (bool): indicate whether this sink should write
an additional newline char after writing each element.
num_shards (int): The number of files (shards) used for output.
If not set, the service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template (str): A template string containing placeholders for
the shard number and shard count. Currently only ``''`` and
``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded
shard number and shard count respectively. This argument can be ``''``
in which case it behaves as if num_shards was set to 1 and only one file
will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.
coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.
compression_type (str): Used to handle compressed output files.
Typical value is :class:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
final file path's extension (as determined by **file_path_prefix**,
**file_name_suffix**, **num_shards** and **shard_name_template**) will
be used to detect the compression.
header (str): String to write at beginning of file as a header.
If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will
be added.
"""
self._sink = _TextSink(
file_path_prefix,
file_name_suffix,
append_trailing_newlines,
num_shards,
shard_name_template,
coder,
compression_type,
header)
def expand(self, pcoll):
return pcoll | Write(self._sink)
|
process_compilers
|
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
# MASKED: process_compilers function (lines 584-643)
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
| 584
| 643
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
process_link_depends
|
Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
# MASKED: process_link_depends function (lines 663-688)
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
| 663
| 688
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
get_langs_used_by_deps
|
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
# MASKED: get_langs_used_by_deps function (lines 1126-1148)
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
| 1,126
| 1,148
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
get_clink_dynamic_linker_and_stdlibs
|
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
# MASKED: get_clink_dynamic_linker_and_stdlibs function (lines 1150-1187)
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
| 1,150
| 1,187
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
get_using_msvc
|
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
# MASKED: get_using_msvc function (lines 1189-1211)
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
| 1,189
| 1,211
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
determine_filenames
|
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
# MASKED: determine_filenames function (lines 1569-1666)
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
| 1,569
| 1,666
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
get_aliases
|
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
# MASKED: get_aliases function (lines 1788-1816)
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
| 1,788
| 1,816
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
get_transitive_build_target_deps
|
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
# MASKED: get_transitive_build_target_deps function (lines 1902-1918)
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
| 1,902
| 1,918
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
|
get
|
A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import array
import asyncio
import collections.abc
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
ForwardRef,
Generic,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
TYPE_CHECKING,
)
import unicodedata
from base64 import b64encode
from bisect import bisect_left
import datetime
import functools
from inspect import isawaitable as _isawaitable, signature as _signature
from operator import attrgetter
import json
import re
import sys
import types
import warnings
from .errors import InvalidArgument
try:
import orjson
except ModuleNotFoundError:
HAS_ORJSON = False
else:
HAS_ORJSON = True
__all__ = (
"oauth_url",
"snowflake_time",
"time_snowflake",
"find",
"get",
"sleep_until",
"utcnow",
"remove_markdown",
"escape_markdown",
"escape_mentions",
"as_chunks",
"format_dt",
)
DISCORD_EPOCH = 1420070400000
class _MissingSentinel:
def __eq__(self, other):
return False
def __bool__(self):
return False
def __repr__(self):
return "..."
MISSING: Any = _MissingSentinel()
class _cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, "__doc__")
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
if TYPE_CHECKING:
from functools import cached_property as cached_property
from typing_extensions import ParamSpec
from .permissions import Permissions
from .abc import Snowflake
from .invite import Invite
from .template import Template
class _RequestLike(Protocol):
headers: Mapping[str, Any]
P = ParamSpec("P")
else:
cached_property = _cached_property
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_Iter = Union[Iterator[T], AsyncIterator[T]]
class CachedSlotProperty(Generic[T, T_co]):
def __init__(self, name: str, function: Callable[[T], T_co]) -> None:
self.name = name
self.function = function
self.__doc__ = getattr(function, "__doc__")
@overload
def __get__(self, instance: None, owner: Type[T]) -> CachedSlotProperty[T, T_co]:
...
@overload
def __get__(self, instance: T, owner: Type[T]) -> T_co:
...
def __get__(self, instance: Optional[T], owner: Type[T]) -> Any:
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
class classproperty(Generic[T_co]):
def __init__(self, fget: Callable[[Any], T_co]) -> None:
self.fget = fget
def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co:
return self.fget(owner)
def __set__(self, instance, value) -> None:
raise AttributeError("cannot set attribute")
def cached_slot_property(name: str) -> Callable[[Callable[[T], T_co]], CachedSlotProperty[T, T_co]]:
def decorator(func: Callable[[T], T_co]) -> CachedSlotProperty[T, T_co]:
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(Generic[T_co], collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied: Sequence[T_co]):
self.__proxied = proxied
def __getitem__(self, idx: int) -> T_co:
return self.__proxied[idx]
def __len__(self) -> int:
return len(self.__proxied)
def __contains__(self, item: Any) -> bool:
return item in self.__proxied
def __iter__(self) -> Iterator[T_co]:
return iter(self.__proxied)
def __reversed__(self) -> Iterator[T_co]:
return reversed(self.__proxied)
def index(self, value: Any, *args, **kwargs) -> int:
return self.__proxied.index(value, *args, **kwargs)
def count(self, value: Any) -> int:
return self.__proxied.count(value)
@overload
def parse_time(timestamp: None) -> None:
...
@overload
def parse_time(timestamp: str) -> datetime.datetime:
...
@overload
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
...
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
if timestamp:
return datetime.datetime.fromisoformat(timestamp)
return None
def copy_doc(original: Callable) -> Callable[[T], T]:
def decorator(overriden: T) -> T:
overriden.__doc__ = original.__doc__
overriden.__signature__ = _signature(original) # type: ignore
return overriden
return decorator
def deprecated(instead: Optional[str] = None) -> Callable[[Callable[P, T]], Callable[P, T]]:
def actual_decorator(func: Callable[P, T]) -> Callable[P, T]:
@functools.wraps(func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = "{0.__name__} is deprecated."
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f"https://discord.com/oauth2/authorize?client_id={client_id}"
url += "&scope=" + "+".join(scopes or ("bot",))
if permissions is not MISSING:
url += f"&permissions={permissions.value}"
if guild is not MISSING:
url += f"&guild_id={guild.id}"
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += "&response_type=code&" + urlencode({"redirect_uri": redirect_uri})
if disable_guild_select:
url += "&disable_guild_select=true"
return url
def snowflake_time(id: int) -> datetime.datetime:
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake.
"""
timestamp = ((id >> 22) + DISCORD_EPOCH) / 1000
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def time_snowflake(dt: datetime.datetime, high: bool = False) -> int:
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
discord_millis = int(dt.timestamp() * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2 ** 22 - 1 if high else 0)
def find(predicate: Callable[[T], Any], seq: Iterable[T]) -> Optional[T]:
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
# MASKED: get function (lines 386-448)
def _unique(iterable: Iterable[T]) -> List[T]:
return [x for x in dict.fromkeys(iterable)]
def _get_as_snowflake(data: Any, key: str) -> Optional[int]:
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data: bytes):
if data.startswith(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"):
return "image/png"
elif data[0:3] == b"\xff\xd8\xff" or data[6:10] in (b"JFIF", b"Exif"):
return "image/jpeg"
elif data.startswith((b"\x47\x49\x46\x38\x37\x61", b"\x47\x49\x46\x38\x39\x61")):
return "image/gif"
elif data.startswith(b"RIFF") and data[8:12] == b"WEBP":
return "image/webp"
else:
raise InvalidArgument("Unsupported image type given")
def _bytes_to_base64_data(data: bytes) -> str:
fmt = "data:{mime};base64,{data}"
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode("ascii")
return fmt.format(mime=mime, data=b64)
if HAS_ORJSON:
def _to_json(obj: Any) -> str: # type: ignore
return orjson.dumps(obj).decode("utf-8")
_from_json = orjson.loads # type: ignore
else:
def _to_json(obj: Any) -> str:
return json.dumps(obj, separators=(",", ":"), ensure_ascii=True)
_from_json = json.loads
def _parse_ratelimit_header(request: Any, *, use_clock: bool = False) -> float:
reset_after: Optional[str] = request.headers.get("X-Ratelimit-Reset-After")
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers["X-Ratelimit-Reset"]), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [asyncio.ensure_future(fut) for fut in futures]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
def get_slots(cls: Type[Any]) -> Iterator[str]:
for mro in reversed(cls.__mro__):
try:
yield from mro.__slots__
except AttributeError:
continue
def compute_timedelta(dt: datetime.datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.datetime.now(datetime.timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime.datetime, result: Optional[T] = None) -> Optional[T]:
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
def utcnow() -> datetime.datetime:
"""A helper function to return an aware UTC datetime representing the current time.
This should be preferred to :meth:`datetime.datetime.utcnow` since it is an aware
datetime, compared to the naive datetime in the standard library.
.. versionadded:: 2.0
Returns
--------
:class:`datetime.datetime`
The current aware datetime in UTC.
"""
return datetime.datetime.now(datetime.timezone.utc)
def valid_icon_size(size: int) -> bool:
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and 4096 >= size >= 16
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
if TYPE_CHECKING:
def __init__(self, data: Iterable[int], *, is_sorted: bool = False):
...
def __new__(cls, data: Iterable[int], *, is_sorted: bool = False):
return array.array.__new__(cls, "Q", data if is_sorted else sorted(data)) # type: ignore
def add(self, element: int) -> None:
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element: int) -> Optional[int]:
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element: int) -> bool:
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r"^[\x00-\x7f]+$")
def _string_width(string: str, *, _IS_ASCII=_IS_ASCII) -> int:
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = "WFA"
func = unicodedata.east_asian_width
return sum(2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1 for char in string)
def resolve_invite(invite: Union[Invite, str]) -> str:
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)"
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code: Union[Template, str]) -> str:
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)"
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = "|".join(r"\{0}(?=([\s\S]*((?<!\{0})\{0})))".format(c) for c in ("*", "`", "_", "~", "|"))
_MARKDOWN_ESCAPE_COMMON = r"^>(?:>>)?\s|\[.+\]\(.+\)"
_MARKDOWN_ESCAPE_REGEX = re.compile(
fr"(?P<markdown>{_MARKDOWN_ESCAPE_SUBREGEX}|{_MARKDOWN_ESCAPE_COMMON})", re.MULTILINE
)
_URL_REGEX = r"(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])"
_MARKDOWN_STOCK_REGEX = fr"(?P<markdown>[_\\~|\*`]|{_MARKDOWN_ESCAPE_COMMON})"
def remove_markdown(text: str, *, ignore_links: bool = True) -> str:
"""A helper function that removes markdown characters.
.. versionadded:: 1.7
.. note::
This function is not markdown aware and may remove meaning from the original text. For example,
if the input contains ``10 * 5`` then it will be converted into ``10 5``.
Parameters
-----------
text: :class:`str`
The text to remove markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters removed.
"""
def replacement(match):
groupdict = match.groupdict()
return groupdict.get("url", "")
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
def escape_markdown(text: str, *, as_needed: bool = False, ignore_links: bool = True) -> str:
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get("url")
if is_url:
return is_url
return "\\" + groupdict["markdown"]
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
else:
text = re.sub(r"\\", r"\\\\", text)
return _MARKDOWN_ESCAPE_REGEX.sub(r"\\\1", text)
def escape_mentions(text: str) -> str:
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
.. note::
For more granular control over what mentions should be escaped
within messages, refer to the :class:`~discord.AllowedMentions`
class.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r"@(everyone|here|[!&]?[0-9]{17,20})", "@\u200b\\1", text)
def _chunk(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
ret = []
n = 0
for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
async def _achunk(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
ret = []
n = 0
async for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
@overload
def as_chunks(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
...
@overload
def as_chunks(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
...
def as_chunks(iterator: _Iter[T], max_size: int) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
max_size: :class:`int`
The maximum chunk size.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError("Chunk sizes must be greater than 0.")
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
PY_310 = sys.version_info >= (3, 10)
def flatten_literal_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
params = []
literal_cls = type(Literal[0])
for p in parameters:
if isinstance(p, literal_cls):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def normalise_optional_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
none_cls = type(None)
return tuple(p for p in parameters if p is not none_cls) + (none_cls,)
def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, "__args__"):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, "__origin__"):
if PY_310 and tp.__class__ is types.UnionType: # type: ignore
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(
evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args
)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError("Literal arguments must be of type str, int, bool, or NoneType.")
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
def resolve_annotation(
annotation: Any,
globalns: Dict[str, Any],
localns: Optional[Dict[str, Any]],
cache: Optional[Dict[str, Any]],
) -> Any:
if annotation is None:
return type(None)
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
locals = globalns if localns is None else localns
if cache is None:
cache = {}
return evaluate_annotation(annotation, globalns, locals, cache)
TimestampStyle = Literal["f", "F", "d", "D", "t", "T", "R"]
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
"""A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
"""
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
|
def get(iterable: Iterable[T], **attrs: Any) -> Optional[T]:
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace("__", "."))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [(attrget(attr.replace("__", ".")), value) for attr, value in attrs.items()]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
| 386
| 448
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import array
import asyncio
import collections.abc
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
ForwardRef,
Generic,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
TYPE_CHECKING,
)
import unicodedata
from base64 import b64encode
from bisect import bisect_left
import datetime
import functools
from inspect import isawaitable as _isawaitable, signature as _signature
from operator import attrgetter
import json
import re
import sys
import types
import warnings
from .errors import InvalidArgument
try:
import orjson
except ModuleNotFoundError:
HAS_ORJSON = False
else:
HAS_ORJSON = True
__all__ = (
"oauth_url",
"snowflake_time",
"time_snowflake",
"find",
"get",
"sleep_until",
"utcnow",
"remove_markdown",
"escape_markdown",
"escape_mentions",
"as_chunks",
"format_dt",
)
DISCORD_EPOCH = 1420070400000
class _MissingSentinel:
def __eq__(self, other):
return False
def __bool__(self):
return False
def __repr__(self):
return "..."
MISSING: Any = _MissingSentinel()
class _cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, "__doc__")
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
if TYPE_CHECKING:
from functools import cached_property as cached_property
from typing_extensions import ParamSpec
from .permissions import Permissions
from .abc import Snowflake
from .invite import Invite
from .template import Template
class _RequestLike(Protocol):
headers: Mapping[str, Any]
P = ParamSpec("P")
else:
cached_property = _cached_property
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_Iter = Union[Iterator[T], AsyncIterator[T]]
class CachedSlotProperty(Generic[T, T_co]):
def __init__(self, name: str, function: Callable[[T], T_co]) -> None:
self.name = name
self.function = function
self.__doc__ = getattr(function, "__doc__")
@overload
def __get__(self, instance: None, owner: Type[T]) -> CachedSlotProperty[T, T_co]:
...
@overload
def __get__(self, instance: T, owner: Type[T]) -> T_co:
...
def __get__(self, instance: Optional[T], owner: Type[T]) -> Any:
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
class classproperty(Generic[T_co]):
def __init__(self, fget: Callable[[Any], T_co]) -> None:
self.fget = fget
def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co:
return self.fget(owner)
def __set__(self, instance, value) -> None:
raise AttributeError("cannot set attribute")
def cached_slot_property(name: str) -> Callable[[Callable[[T], T_co]], CachedSlotProperty[T, T_co]]:
def decorator(func: Callable[[T], T_co]) -> CachedSlotProperty[T, T_co]:
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(Generic[T_co], collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied: Sequence[T_co]):
self.__proxied = proxied
def __getitem__(self, idx: int) -> T_co:
return self.__proxied[idx]
def __len__(self) -> int:
return len(self.__proxied)
def __contains__(self, item: Any) -> bool:
return item in self.__proxied
def __iter__(self) -> Iterator[T_co]:
return iter(self.__proxied)
def __reversed__(self) -> Iterator[T_co]:
return reversed(self.__proxied)
def index(self, value: Any, *args, **kwargs) -> int:
return self.__proxied.index(value, *args, **kwargs)
def count(self, value: Any) -> int:
return self.__proxied.count(value)
@overload
def parse_time(timestamp: None) -> None:
...
@overload
def parse_time(timestamp: str) -> datetime.datetime:
...
@overload
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
...
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
if timestamp:
return datetime.datetime.fromisoformat(timestamp)
return None
def copy_doc(original: Callable) -> Callable[[T], T]:
def decorator(overriden: T) -> T:
overriden.__doc__ = original.__doc__
overriden.__signature__ = _signature(original) # type: ignore
return overriden
return decorator
def deprecated(instead: Optional[str] = None) -> Callable[[Callable[P, T]], Callable[P, T]]:
def actual_decorator(func: Callable[P, T]) -> Callable[P, T]:
@functools.wraps(func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = "{0.__name__} is deprecated."
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f"https://discord.com/oauth2/authorize?client_id={client_id}"
url += "&scope=" + "+".join(scopes or ("bot",))
if permissions is not MISSING:
url += f"&permissions={permissions.value}"
if guild is not MISSING:
url += f"&guild_id={guild.id}"
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += "&response_type=code&" + urlencode({"redirect_uri": redirect_uri})
if disable_guild_select:
url += "&disable_guild_select=true"
return url
def snowflake_time(id: int) -> datetime.datetime:
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake.
"""
timestamp = ((id >> 22) + DISCORD_EPOCH) / 1000
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def time_snowflake(dt: datetime.datetime, high: bool = False) -> int:
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
discord_millis = int(dt.timestamp() * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2 ** 22 - 1 if high else 0)
def find(predicate: Callable[[T], Any], seq: Iterable[T]) -> Optional[T]:
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
def get(iterable: Iterable[T], **attrs: Any) -> Optional[T]:
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace("__", "."))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [(attrget(attr.replace("__", ".")), value) for attr, value in attrs.items()]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
def _unique(iterable: Iterable[T]) -> List[T]:
return [x for x in dict.fromkeys(iterable)]
def _get_as_snowflake(data: Any, key: str) -> Optional[int]:
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data: bytes):
if data.startswith(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"):
return "image/png"
elif data[0:3] == b"\xff\xd8\xff" or data[6:10] in (b"JFIF", b"Exif"):
return "image/jpeg"
elif data.startswith((b"\x47\x49\x46\x38\x37\x61", b"\x47\x49\x46\x38\x39\x61")):
return "image/gif"
elif data.startswith(b"RIFF") and data[8:12] == b"WEBP":
return "image/webp"
else:
raise InvalidArgument("Unsupported image type given")
def _bytes_to_base64_data(data: bytes) -> str:
fmt = "data:{mime};base64,{data}"
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode("ascii")
return fmt.format(mime=mime, data=b64)
if HAS_ORJSON:
def _to_json(obj: Any) -> str: # type: ignore
return orjson.dumps(obj).decode("utf-8")
_from_json = orjson.loads # type: ignore
else:
def _to_json(obj: Any) -> str:
return json.dumps(obj, separators=(",", ":"), ensure_ascii=True)
_from_json = json.loads
def _parse_ratelimit_header(request: Any, *, use_clock: bool = False) -> float:
reset_after: Optional[str] = request.headers.get("X-Ratelimit-Reset-After")
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers["X-Ratelimit-Reset"]), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [asyncio.ensure_future(fut) for fut in futures]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
def get_slots(cls: Type[Any]) -> Iterator[str]:
for mro in reversed(cls.__mro__):
try:
yield from mro.__slots__
except AttributeError:
continue
def compute_timedelta(dt: datetime.datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.datetime.now(datetime.timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime.datetime, result: Optional[T] = None) -> Optional[T]:
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
def utcnow() -> datetime.datetime:
"""A helper function to return an aware UTC datetime representing the current time.
This should be preferred to :meth:`datetime.datetime.utcnow` since it is an aware
datetime, compared to the naive datetime in the standard library.
.. versionadded:: 2.0
Returns
--------
:class:`datetime.datetime`
The current aware datetime in UTC.
"""
return datetime.datetime.now(datetime.timezone.utc)
def valid_icon_size(size: int) -> bool:
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and 4096 >= size >= 16
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
if TYPE_CHECKING:
def __init__(self, data: Iterable[int], *, is_sorted: bool = False):
...
def __new__(cls, data: Iterable[int], *, is_sorted: bool = False):
return array.array.__new__(cls, "Q", data if is_sorted else sorted(data)) # type: ignore
def add(self, element: int) -> None:
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element: int) -> Optional[int]:
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element: int) -> bool:
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r"^[\x00-\x7f]+$")
def _string_width(string: str, *, _IS_ASCII=_IS_ASCII) -> int:
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = "WFA"
func = unicodedata.east_asian_width
return sum(2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1 for char in string)
def resolve_invite(invite: Union[Invite, str]) -> str:
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)"
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code: Union[Template, str]) -> str:
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)"
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = "|".join(r"\{0}(?=([\s\S]*((?<!\{0})\{0})))".format(c) for c in ("*", "`", "_", "~", "|"))
_MARKDOWN_ESCAPE_COMMON = r"^>(?:>>)?\s|\[.+\]\(.+\)"
_MARKDOWN_ESCAPE_REGEX = re.compile(
fr"(?P<markdown>{_MARKDOWN_ESCAPE_SUBREGEX}|{_MARKDOWN_ESCAPE_COMMON})", re.MULTILINE
)
_URL_REGEX = r"(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])"
_MARKDOWN_STOCK_REGEX = fr"(?P<markdown>[_\\~|\*`]|{_MARKDOWN_ESCAPE_COMMON})"
def remove_markdown(text: str, *, ignore_links: bool = True) -> str:
"""A helper function that removes markdown characters.
.. versionadded:: 1.7
.. note::
This function is not markdown aware and may remove meaning from the original text. For example,
if the input contains ``10 * 5`` then it will be converted into ``10 5``.
Parameters
-----------
text: :class:`str`
The text to remove markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters removed.
"""
def replacement(match):
groupdict = match.groupdict()
return groupdict.get("url", "")
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
def escape_markdown(text: str, *, as_needed: bool = False, ignore_links: bool = True) -> str:
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get("url")
if is_url:
return is_url
return "\\" + groupdict["markdown"]
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
else:
text = re.sub(r"\\", r"\\\\", text)
return _MARKDOWN_ESCAPE_REGEX.sub(r"\\\1", text)
def escape_mentions(text: str) -> str:
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
.. note::
For more granular control over what mentions should be escaped
within messages, refer to the :class:`~discord.AllowedMentions`
class.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r"@(everyone|here|[!&]?[0-9]{17,20})", "@\u200b\\1", text)
def _chunk(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
ret = []
n = 0
for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
async def _achunk(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
ret = []
n = 0
async for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
@overload
def as_chunks(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
...
@overload
def as_chunks(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
...
def as_chunks(iterator: _Iter[T], max_size: int) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
max_size: :class:`int`
The maximum chunk size.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError("Chunk sizes must be greater than 0.")
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
PY_310 = sys.version_info >= (3, 10)
def flatten_literal_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
params = []
literal_cls = type(Literal[0])
for p in parameters:
if isinstance(p, literal_cls):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def normalise_optional_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
none_cls = type(None)
return tuple(p for p in parameters if p is not none_cls) + (none_cls,)
def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, "__args__"):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, "__origin__"):
if PY_310 and tp.__class__ is types.UnionType: # type: ignore
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(
evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args
)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError("Literal arguments must be of type str, int, bool, or NoneType.")
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
def resolve_annotation(
annotation: Any,
globalns: Dict[str, Any],
localns: Optional[Dict[str, Any]],
cache: Optional[Dict[str, Any]],
) -> Any:
if annotation is None:
return type(None)
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
locals = globalns if localns is None else localns
if cache is None:
cache = {}
return evaluate_annotation(annotation, globalns, locals, cache)
TimestampStyle = Literal["f", "F", "d", "D", "t", "T", "R"]
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
"""A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
"""
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
|
format_dt
|
A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import array
import asyncio
import collections.abc
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
ForwardRef,
Generic,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
TYPE_CHECKING,
)
import unicodedata
from base64 import b64encode
from bisect import bisect_left
import datetime
import functools
from inspect import isawaitable as _isawaitable, signature as _signature
from operator import attrgetter
import json
import re
import sys
import types
import warnings
from .errors import InvalidArgument
try:
import orjson
except ModuleNotFoundError:
HAS_ORJSON = False
else:
HAS_ORJSON = True
__all__ = (
"oauth_url",
"snowflake_time",
"time_snowflake",
"find",
"get",
"sleep_until",
"utcnow",
"remove_markdown",
"escape_markdown",
"escape_mentions",
"as_chunks",
"format_dt",
)
DISCORD_EPOCH = 1420070400000
class _MissingSentinel:
def __eq__(self, other):
return False
def __bool__(self):
return False
def __repr__(self):
return "..."
MISSING: Any = _MissingSentinel()
class _cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, "__doc__")
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
if TYPE_CHECKING:
from functools import cached_property as cached_property
from typing_extensions import ParamSpec
from .permissions import Permissions
from .abc import Snowflake
from .invite import Invite
from .template import Template
class _RequestLike(Protocol):
headers: Mapping[str, Any]
P = ParamSpec("P")
else:
cached_property = _cached_property
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_Iter = Union[Iterator[T], AsyncIterator[T]]
class CachedSlotProperty(Generic[T, T_co]):
def __init__(self, name: str, function: Callable[[T], T_co]) -> None:
self.name = name
self.function = function
self.__doc__ = getattr(function, "__doc__")
@overload
def __get__(self, instance: None, owner: Type[T]) -> CachedSlotProperty[T, T_co]:
...
@overload
def __get__(self, instance: T, owner: Type[T]) -> T_co:
...
def __get__(self, instance: Optional[T], owner: Type[T]) -> Any:
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
class classproperty(Generic[T_co]):
def __init__(self, fget: Callable[[Any], T_co]) -> None:
self.fget = fget
def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co:
return self.fget(owner)
def __set__(self, instance, value) -> None:
raise AttributeError("cannot set attribute")
def cached_slot_property(name: str) -> Callable[[Callable[[T], T_co]], CachedSlotProperty[T, T_co]]:
def decorator(func: Callable[[T], T_co]) -> CachedSlotProperty[T, T_co]:
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(Generic[T_co], collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied: Sequence[T_co]):
self.__proxied = proxied
def __getitem__(self, idx: int) -> T_co:
return self.__proxied[idx]
def __len__(self) -> int:
return len(self.__proxied)
def __contains__(self, item: Any) -> bool:
return item in self.__proxied
def __iter__(self) -> Iterator[T_co]:
return iter(self.__proxied)
def __reversed__(self) -> Iterator[T_co]:
return reversed(self.__proxied)
def index(self, value: Any, *args, **kwargs) -> int:
return self.__proxied.index(value, *args, **kwargs)
def count(self, value: Any) -> int:
return self.__proxied.count(value)
@overload
def parse_time(timestamp: None) -> None:
...
@overload
def parse_time(timestamp: str) -> datetime.datetime:
...
@overload
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
...
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
if timestamp:
return datetime.datetime.fromisoformat(timestamp)
return None
def copy_doc(original: Callable) -> Callable[[T], T]:
def decorator(overriden: T) -> T:
overriden.__doc__ = original.__doc__
overriden.__signature__ = _signature(original) # type: ignore
return overriden
return decorator
def deprecated(instead: Optional[str] = None) -> Callable[[Callable[P, T]], Callable[P, T]]:
def actual_decorator(func: Callable[P, T]) -> Callable[P, T]:
@functools.wraps(func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = "{0.__name__} is deprecated."
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f"https://discord.com/oauth2/authorize?client_id={client_id}"
url += "&scope=" + "+".join(scopes or ("bot",))
if permissions is not MISSING:
url += f"&permissions={permissions.value}"
if guild is not MISSING:
url += f"&guild_id={guild.id}"
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += "&response_type=code&" + urlencode({"redirect_uri": redirect_uri})
if disable_guild_select:
url += "&disable_guild_select=true"
return url
def snowflake_time(id: int) -> datetime.datetime:
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake.
"""
timestamp = ((id >> 22) + DISCORD_EPOCH) / 1000
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def time_snowflake(dt: datetime.datetime, high: bool = False) -> int:
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
discord_millis = int(dt.timestamp() * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2 ** 22 - 1 if high else 0)
def find(predicate: Callable[[T], Any], seq: Iterable[T]) -> Optional[T]:
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
def get(iterable: Iterable[T], **attrs: Any) -> Optional[T]:
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace("__", "."))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [(attrget(attr.replace("__", ".")), value) for attr, value in attrs.items()]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
def _unique(iterable: Iterable[T]) -> List[T]:
return [x for x in dict.fromkeys(iterable)]
def _get_as_snowflake(data: Any, key: str) -> Optional[int]:
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data: bytes):
if data.startswith(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"):
return "image/png"
elif data[0:3] == b"\xff\xd8\xff" or data[6:10] in (b"JFIF", b"Exif"):
return "image/jpeg"
elif data.startswith((b"\x47\x49\x46\x38\x37\x61", b"\x47\x49\x46\x38\x39\x61")):
return "image/gif"
elif data.startswith(b"RIFF") and data[8:12] == b"WEBP":
return "image/webp"
else:
raise InvalidArgument("Unsupported image type given")
def _bytes_to_base64_data(data: bytes) -> str:
fmt = "data:{mime};base64,{data}"
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode("ascii")
return fmt.format(mime=mime, data=b64)
if HAS_ORJSON:
def _to_json(obj: Any) -> str: # type: ignore
return orjson.dumps(obj).decode("utf-8")
_from_json = orjson.loads # type: ignore
else:
def _to_json(obj: Any) -> str:
return json.dumps(obj, separators=(",", ":"), ensure_ascii=True)
_from_json = json.loads
def _parse_ratelimit_header(request: Any, *, use_clock: bool = False) -> float:
reset_after: Optional[str] = request.headers.get("X-Ratelimit-Reset-After")
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers["X-Ratelimit-Reset"]), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [asyncio.ensure_future(fut) for fut in futures]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
def get_slots(cls: Type[Any]) -> Iterator[str]:
for mro in reversed(cls.__mro__):
try:
yield from mro.__slots__
except AttributeError:
continue
def compute_timedelta(dt: datetime.datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.datetime.now(datetime.timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime.datetime, result: Optional[T] = None) -> Optional[T]:
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
def utcnow() -> datetime.datetime:
"""A helper function to return an aware UTC datetime representing the current time.
This should be preferred to :meth:`datetime.datetime.utcnow` since it is an aware
datetime, compared to the naive datetime in the standard library.
.. versionadded:: 2.0
Returns
--------
:class:`datetime.datetime`
The current aware datetime in UTC.
"""
return datetime.datetime.now(datetime.timezone.utc)
def valid_icon_size(size: int) -> bool:
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and 4096 >= size >= 16
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
if TYPE_CHECKING:
def __init__(self, data: Iterable[int], *, is_sorted: bool = False):
...
def __new__(cls, data: Iterable[int], *, is_sorted: bool = False):
return array.array.__new__(cls, "Q", data if is_sorted else sorted(data)) # type: ignore
def add(self, element: int) -> None:
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element: int) -> Optional[int]:
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element: int) -> bool:
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r"^[\x00-\x7f]+$")
def _string_width(string: str, *, _IS_ASCII=_IS_ASCII) -> int:
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = "WFA"
func = unicodedata.east_asian_width
return sum(2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1 for char in string)
def resolve_invite(invite: Union[Invite, str]) -> str:
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)"
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code: Union[Template, str]) -> str:
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)"
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = "|".join(r"\{0}(?=([\s\S]*((?<!\{0})\{0})))".format(c) for c in ("*", "`", "_", "~", "|"))
_MARKDOWN_ESCAPE_COMMON = r"^>(?:>>)?\s|\[.+\]\(.+\)"
_MARKDOWN_ESCAPE_REGEX = re.compile(
fr"(?P<markdown>{_MARKDOWN_ESCAPE_SUBREGEX}|{_MARKDOWN_ESCAPE_COMMON})", re.MULTILINE
)
_URL_REGEX = r"(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])"
_MARKDOWN_STOCK_REGEX = fr"(?P<markdown>[_\\~|\*`]|{_MARKDOWN_ESCAPE_COMMON})"
def remove_markdown(text: str, *, ignore_links: bool = True) -> str:
"""A helper function that removes markdown characters.
.. versionadded:: 1.7
.. note::
This function is not markdown aware and may remove meaning from the original text. For example,
if the input contains ``10 * 5`` then it will be converted into ``10 5``.
Parameters
-----------
text: :class:`str`
The text to remove markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters removed.
"""
def replacement(match):
groupdict = match.groupdict()
return groupdict.get("url", "")
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
def escape_markdown(text: str, *, as_needed: bool = False, ignore_links: bool = True) -> str:
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get("url")
if is_url:
return is_url
return "\\" + groupdict["markdown"]
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
else:
text = re.sub(r"\\", r"\\\\", text)
return _MARKDOWN_ESCAPE_REGEX.sub(r"\\\1", text)
def escape_mentions(text: str) -> str:
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
.. note::
For more granular control over what mentions should be escaped
within messages, refer to the :class:`~discord.AllowedMentions`
class.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r"@(everyone|here|[!&]?[0-9]{17,20})", "@\u200b\\1", text)
def _chunk(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
ret = []
n = 0
for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
async def _achunk(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
ret = []
n = 0
async for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
@overload
def as_chunks(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
...
@overload
def as_chunks(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
...
def as_chunks(iterator: _Iter[T], max_size: int) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
max_size: :class:`int`
The maximum chunk size.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError("Chunk sizes must be greater than 0.")
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
PY_310 = sys.version_info >= (3, 10)
def flatten_literal_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
params = []
literal_cls = type(Literal[0])
for p in parameters:
if isinstance(p, literal_cls):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def normalise_optional_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
none_cls = type(None)
return tuple(p for p in parameters if p is not none_cls) + (none_cls,)
def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, "__args__"):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, "__origin__"):
if PY_310 and tp.__class__ is types.UnionType: # type: ignore
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(
evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args
)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError("Literal arguments must be of type str, int, bool, or NoneType.")
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
def resolve_annotation(
annotation: Any,
globalns: Dict[str, Any],
localns: Optional[Dict[str, Any]],
cache: Optional[Dict[str, Any]],
) -> Any:
if annotation is None:
return type(None)
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
locals = globalns if localns is None else localns
if cache is None:
cache = {}
return evaluate_annotation(annotation, globalns, locals, cache)
TimestampStyle = Literal["f", "F", "d", "D", "t", "T", "R"]
# MASKED: format_dt function (lines 980-1022)
|
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
"""A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
"""
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
| 980
| 1,022
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import array
import asyncio
import collections.abc
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
ForwardRef,
Generic,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
TYPE_CHECKING,
)
import unicodedata
from base64 import b64encode
from bisect import bisect_left
import datetime
import functools
from inspect import isawaitable as _isawaitable, signature as _signature
from operator import attrgetter
import json
import re
import sys
import types
import warnings
from .errors import InvalidArgument
try:
import orjson
except ModuleNotFoundError:
HAS_ORJSON = False
else:
HAS_ORJSON = True
__all__ = (
"oauth_url",
"snowflake_time",
"time_snowflake",
"find",
"get",
"sleep_until",
"utcnow",
"remove_markdown",
"escape_markdown",
"escape_mentions",
"as_chunks",
"format_dt",
)
DISCORD_EPOCH = 1420070400000
class _MissingSentinel:
def __eq__(self, other):
return False
def __bool__(self):
return False
def __repr__(self):
return "..."
MISSING: Any = _MissingSentinel()
class _cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, "__doc__")
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
if TYPE_CHECKING:
from functools import cached_property as cached_property
from typing_extensions import ParamSpec
from .permissions import Permissions
from .abc import Snowflake
from .invite import Invite
from .template import Template
class _RequestLike(Protocol):
headers: Mapping[str, Any]
P = ParamSpec("P")
else:
cached_property = _cached_property
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_Iter = Union[Iterator[T], AsyncIterator[T]]
class CachedSlotProperty(Generic[T, T_co]):
def __init__(self, name: str, function: Callable[[T], T_co]) -> None:
self.name = name
self.function = function
self.__doc__ = getattr(function, "__doc__")
@overload
def __get__(self, instance: None, owner: Type[T]) -> CachedSlotProperty[T, T_co]:
...
@overload
def __get__(self, instance: T, owner: Type[T]) -> T_co:
...
def __get__(self, instance: Optional[T], owner: Type[T]) -> Any:
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
class classproperty(Generic[T_co]):
def __init__(self, fget: Callable[[Any], T_co]) -> None:
self.fget = fget
def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co:
return self.fget(owner)
def __set__(self, instance, value) -> None:
raise AttributeError("cannot set attribute")
def cached_slot_property(name: str) -> Callable[[Callable[[T], T_co]], CachedSlotProperty[T, T_co]]:
def decorator(func: Callable[[T], T_co]) -> CachedSlotProperty[T, T_co]:
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(Generic[T_co], collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied: Sequence[T_co]):
self.__proxied = proxied
def __getitem__(self, idx: int) -> T_co:
return self.__proxied[idx]
def __len__(self) -> int:
return len(self.__proxied)
def __contains__(self, item: Any) -> bool:
return item in self.__proxied
def __iter__(self) -> Iterator[T_co]:
return iter(self.__proxied)
def __reversed__(self) -> Iterator[T_co]:
return reversed(self.__proxied)
def index(self, value: Any, *args, **kwargs) -> int:
return self.__proxied.index(value, *args, **kwargs)
def count(self, value: Any) -> int:
return self.__proxied.count(value)
@overload
def parse_time(timestamp: None) -> None:
...
@overload
def parse_time(timestamp: str) -> datetime.datetime:
...
@overload
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
...
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
if timestamp:
return datetime.datetime.fromisoformat(timestamp)
return None
def copy_doc(original: Callable) -> Callable[[T], T]:
def decorator(overriden: T) -> T:
overriden.__doc__ = original.__doc__
overriden.__signature__ = _signature(original) # type: ignore
return overriden
return decorator
def deprecated(instead: Optional[str] = None) -> Callable[[Callable[P, T]], Callable[P, T]]:
def actual_decorator(func: Callable[P, T]) -> Callable[P, T]:
@functools.wraps(func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = "{0.__name__} is deprecated."
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f"https://discord.com/oauth2/authorize?client_id={client_id}"
url += "&scope=" + "+".join(scopes or ("bot",))
if permissions is not MISSING:
url += f"&permissions={permissions.value}"
if guild is not MISSING:
url += f"&guild_id={guild.id}"
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += "&response_type=code&" + urlencode({"redirect_uri": redirect_uri})
if disable_guild_select:
url += "&disable_guild_select=true"
return url
def snowflake_time(id: int) -> datetime.datetime:
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake.
"""
timestamp = ((id >> 22) + DISCORD_EPOCH) / 1000
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def time_snowflake(dt: datetime.datetime, high: bool = False) -> int:
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
discord_millis = int(dt.timestamp() * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2 ** 22 - 1 if high else 0)
def find(predicate: Callable[[T], Any], seq: Iterable[T]) -> Optional[T]:
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
def get(iterable: Iterable[T], **attrs: Any) -> Optional[T]:
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace("__", "."))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [(attrget(attr.replace("__", ".")), value) for attr, value in attrs.items()]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
def _unique(iterable: Iterable[T]) -> List[T]:
return [x for x in dict.fromkeys(iterable)]
def _get_as_snowflake(data: Any, key: str) -> Optional[int]:
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data: bytes):
if data.startswith(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"):
return "image/png"
elif data[0:3] == b"\xff\xd8\xff" or data[6:10] in (b"JFIF", b"Exif"):
return "image/jpeg"
elif data.startswith((b"\x47\x49\x46\x38\x37\x61", b"\x47\x49\x46\x38\x39\x61")):
return "image/gif"
elif data.startswith(b"RIFF") and data[8:12] == b"WEBP":
return "image/webp"
else:
raise InvalidArgument("Unsupported image type given")
def _bytes_to_base64_data(data: bytes) -> str:
fmt = "data:{mime};base64,{data}"
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode("ascii")
return fmt.format(mime=mime, data=b64)
if HAS_ORJSON:
def _to_json(obj: Any) -> str: # type: ignore
return orjson.dumps(obj).decode("utf-8")
_from_json = orjson.loads # type: ignore
else:
def _to_json(obj: Any) -> str:
return json.dumps(obj, separators=(",", ":"), ensure_ascii=True)
_from_json = json.loads
def _parse_ratelimit_header(request: Any, *, use_clock: bool = False) -> float:
reset_after: Optional[str] = request.headers.get("X-Ratelimit-Reset-After")
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers["X-Ratelimit-Reset"]), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [asyncio.ensure_future(fut) for fut in futures]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
def get_slots(cls: Type[Any]) -> Iterator[str]:
for mro in reversed(cls.__mro__):
try:
yield from mro.__slots__
except AttributeError:
continue
def compute_timedelta(dt: datetime.datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.datetime.now(datetime.timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime.datetime, result: Optional[T] = None) -> Optional[T]:
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
def utcnow() -> datetime.datetime:
"""A helper function to return an aware UTC datetime representing the current time.
This should be preferred to :meth:`datetime.datetime.utcnow` since it is an aware
datetime, compared to the naive datetime in the standard library.
.. versionadded:: 2.0
Returns
--------
:class:`datetime.datetime`
The current aware datetime in UTC.
"""
return datetime.datetime.now(datetime.timezone.utc)
def valid_icon_size(size: int) -> bool:
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and 4096 >= size >= 16
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
if TYPE_CHECKING:
def __init__(self, data: Iterable[int], *, is_sorted: bool = False):
...
def __new__(cls, data: Iterable[int], *, is_sorted: bool = False):
return array.array.__new__(cls, "Q", data if is_sorted else sorted(data)) # type: ignore
def add(self, element: int) -> None:
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element: int) -> Optional[int]:
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element: int) -> bool:
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r"^[\x00-\x7f]+$")
def _string_width(string: str, *, _IS_ASCII=_IS_ASCII) -> int:
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = "WFA"
func = unicodedata.east_asian_width
return sum(2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1 for char in string)
def resolve_invite(invite: Union[Invite, str]) -> str:
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)"
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code: Union[Template, str]) -> str:
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)"
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = "|".join(r"\{0}(?=([\s\S]*((?<!\{0})\{0})))".format(c) for c in ("*", "`", "_", "~", "|"))
_MARKDOWN_ESCAPE_COMMON = r"^>(?:>>)?\s|\[.+\]\(.+\)"
_MARKDOWN_ESCAPE_REGEX = re.compile(
fr"(?P<markdown>{_MARKDOWN_ESCAPE_SUBREGEX}|{_MARKDOWN_ESCAPE_COMMON})", re.MULTILINE
)
_URL_REGEX = r"(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])"
_MARKDOWN_STOCK_REGEX = fr"(?P<markdown>[_\\~|\*`]|{_MARKDOWN_ESCAPE_COMMON})"
def remove_markdown(text: str, *, ignore_links: bool = True) -> str:
"""A helper function that removes markdown characters.
.. versionadded:: 1.7
.. note::
This function is not markdown aware and may remove meaning from the original text. For example,
if the input contains ``10 * 5`` then it will be converted into ``10 5``.
Parameters
-----------
text: :class:`str`
The text to remove markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters removed.
"""
def replacement(match):
groupdict = match.groupdict()
return groupdict.get("url", "")
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
def escape_markdown(text: str, *, as_needed: bool = False, ignore_links: bool = True) -> str:
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get("url")
if is_url:
return is_url
return "\\" + groupdict["markdown"]
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
else:
text = re.sub(r"\\", r"\\\\", text)
return _MARKDOWN_ESCAPE_REGEX.sub(r"\\\1", text)
def escape_mentions(text: str) -> str:
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
.. note::
For more granular control over what mentions should be escaped
within messages, refer to the :class:`~discord.AllowedMentions`
class.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r"@(everyone|here|[!&]?[0-9]{17,20})", "@\u200b\\1", text)
def _chunk(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
ret = []
n = 0
for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
async def _achunk(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
ret = []
n = 0
async for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
@overload
def as_chunks(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
...
@overload
def as_chunks(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
...
def as_chunks(iterator: _Iter[T], max_size: int) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
max_size: :class:`int`
The maximum chunk size.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError("Chunk sizes must be greater than 0.")
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
PY_310 = sys.version_info >= (3, 10)
def flatten_literal_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
params = []
literal_cls = type(Literal[0])
for p in parameters:
if isinstance(p, literal_cls):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def normalise_optional_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
none_cls = type(None)
return tuple(p for p in parameters if p is not none_cls) + (none_cls,)
def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, "__args__"):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, "__origin__"):
if PY_310 and tp.__class__ is types.UnionType: # type: ignore
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(
evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args
)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError("Literal arguments must be of type str, int, bool, or NoneType.")
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
def resolve_annotation(
annotation: Any,
globalns: Dict[str, Any],
localns: Optional[Dict[str, Any]],
cache: Optional[Dict[str, Any]],
) -> Any:
if annotation is None:
return type(None)
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
locals = globalns if localns is None else localns
if cache is None:
cache = {}
return evaluate_annotation(annotation, globalns, locals, cache)
TimestampStyle = Literal["f", "F", "d", "D", "t", "T", "R"]
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
"""A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
"""
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
|
__init__
|
:param rule_file_path: Path to the file containing rule definition.
:type rule_file_path: ``str``
:param trigger_instance_file_path: Path to the file containg trigger instance definition.
:type trigger_instance_file_path: ``str``
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import mock
from jinja2.exceptions import UndefinedError
from st2common import log as logging
from st2common.content.loader import MetaLoader
from st2common.models.db.rule import RuleDB
from st2common.models.db.trigger import TriggerDB
from st2common.models.db.trigger import TriggerInstanceDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.reactor import Rule, TriggerInstance, Trigger
from st2reactor.rules.enforcer import RuleEnforcer
from st2reactor.rules.matcher import RulesMatcher
__all__ = [
'RuleTester'
]
LOG = logging.getLogger(__name__)
class RuleTester(object):
# MASKED: __init__ function (lines 43-56)
def evaluate(self):
"""
Evaluate trigger instance against the rule.
:return: ``True`` if the rule matches, ``False`` otherwise.
:rtype: ``boolean``
"""
rule_db = self._get_rule_db()
trigger_instance_db, trigger_db = self._get_trigger_instance_db()
# The trigger check needs to be performed here as that is not performed
# by RulesMatcher.
if rule_db.trigger != trigger_db.ref:
LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
rule_db.trigger, trigger_db.ref)
return False
# Check if rule matches criteria.
matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
rules=[rule_db], extra_info=True)
matching_rules = matcher.get_matching_rules()
# Rule does not match so early exit.
if len(matching_rules) < 1:
return False
# Check if rule can be enforced
enforcer = RuleEnforcer(trigger_instance=trigger_instance_db, rule=rule_db)
runner_type_db = mock.Mock()
runner_type_db.runner_parameters = {}
action_db = mock.Mock()
action_db.parameters = {}
params = rule_db.action.parameters # pylint: disable=no-member
context, additional_contexts = enforcer.get_action_execution_context(action_db=action_db,
trace_context=None)
# Note: We only return partially resolved parameters.
# To be able to return all parameters we would need access to corresponding ActionDB,
# RunnerTypeDB and ConfigDB object, but this would add a dependency on the database and the
# tool is meant to be used standalone.
try:
params = enforcer.get_resolved_parameters(action_db=action_db,
runnertype_db=runner_type_db,
params=params,
context=context,
additional_contexts=additional_contexts)
LOG.info('Action parameters resolved to:')
for param in six.iteritems(params):
LOG.info('\t%s: %s', param[0], param[1])
return True
except (UndefinedError, ValueError) as e:
LOG.error('Failed to resolve parameters\n\tOriginal error : %s', six.text_type(e))
return False
except:
LOG.exception('Failed to resolve parameters.')
return False
def _get_rule_db(self):
if self._rule_file_path:
return self._get_rule_db_from_file(
file_path=os.path.realpath(self._rule_file_path))
elif self._rule_ref:
return Rule.get_by_ref(self._rule_ref)
raise ValueError('One of _rule_file_path or _rule_ref should be specified.')
def _get_trigger_instance_db(self):
if self._trigger_instance_file_path:
return self._get_trigger_instance_db_from_file(
file_path=os.path.realpath(self._trigger_instance_file_path))
elif self._trigger_instance_id:
trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
return trigger_instance_db, trigger_db
raise ValueError('One of _trigger_instance_file_path or'
'_trigger_instance_id should be specified.')
def _get_rule_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
pack = data.get('pack', 'unknown')
name = data.get('name', 'unknown')
trigger = data['trigger']['type']
criteria = data.get('criteria', None)
action = data.get('action', {})
rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action=action,
enabled=True)
rule_db.id = 'rule_tester_rule'
return rule_db
def _get_trigger_instance_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
instance = TriggerInstanceDB(**data)
instance.id = 'rule_tester_instance'
trigger_ref = ResourceReference.from_string_reference(instance['trigger'])
trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
return instance, trigger_db
|
def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None,
trigger_instance_id=None):
"""
:param rule_file_path: Path to the file containing rule definition.
:type rule_file_path: ``str``
:param trigger_instance_file_path: Path to the file containg trigger instance definition.
:type trigger_instance_file_path: ``str``
"""
self._rule_file_path = rule_file_path
self._rule_ref = rule_ref
self._trigger_instance_file_path = trigger_instance_file_path
self._trigger_instance_id = trigger_instance_id
self._meta_loader = MetaLoader()
| 43
| 56
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import mock
from jinja2.exceptions import UndefinedError
from st2common import log as logging
from st2common.content.loader import MetaLoader
from st2common.models.db.rule import RuleDB
from st2common.models.db.trigger import TriggerDB
from st2common.models.db.trigger import TriggerInstanceDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.reactor import Rule, TriggerInstance, Trigger
from st2reactor.rules.enforcer import RuleEnforcer
from st2reactor.rules.matcher import RulesMatcher
__all__ = [
'RuleTester'
]
LOG = logging.getLogger(__name__)
class RuleTester(object):
def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None,
trigger_instance_id=None):
"""
:param rule_file_path: Path to the file containing rule definition.
:type rule_file_path: ``str``
:param trigger_instance_file_path: Path to the file containg trigger instance definition.
:type trigger_instance_file_path: ``str``
"""
self._rule_file_path = rule_file_path
self._rule_ref = rule_ref
self._trigger_instance_file_path = trigger_instance_file_path
self._trigger_instance_id = trigger_instance_id
self._meta_loader = MetaLoader()
def evaluate(self):
"""
Evaluate trigger instance against the rule.
:return: ``True`` if the rule matches, ``False`` otherwise.
:rtype: ``boolean``
"""
rule_db = self._get_rule_db()
trigger_instance_db, trigger_db = self._get_trigger_instance_db()
# The trigger check needs to be performed here as that is not performed
# by RulesMatcher.
if rule_db.trigger != trigger_db.ref:
LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
rule_db.trigger, trigger_db.ref)
return False
# Check if rule matches criteria.
matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
rules=[rule_db], extra_info=True)
matching_rules = matcher.get_matching_rules()
# Rule does not match so early exit.
if len(matching_rules) < 1:
return False
# Check if rule can be enforced
enforcer = RuleEnforcer(trigger_instance=trigger_instance_db, rule=rule_db)
runner_type_db = mock.Mock()
runner_type_db.runner_parameters = {}
action_db = mock.Mock()
action_db.parameters = {}
params = rule_db.action.parameters # pylint: disable=no-member
context, additional_contexts = enforcer.get_action_execution_context(action_db=action_db,
trace_context=None)
# Note: We only return partially resolved parameters.
# To be able to return all parameters we would need access to corresponding ActionDB,
# RunnerTypeDB and ConfigDB object, but this would add a dependency on the database and the
# tool is meant to be used standalone.
try:
params = enforcer.get_resolved_parameters(action_db=action_db,
runnertype_db=runner_type_db,
params=params,
context=context,
additional_contexts=additional_contexts)
LOG.info('Action parameters resolved to:')
for param in six.iteritems(params):
LOG.info('\t%s: %s', param[0], param[1])
return True
except (UndefinedError, ValueError) as e:
LOG.error('Failed to resolve parameters\n\tOriginal error : %s', six.text_type(e))
return False
except:
LOG.exception('Failed to resolve parameters.')
return False
def _get_rule_db(self):
if self._rule_file_path:
return self._get_rule_db_from_file(
file_path=os.path.realpath(self._rule_file_path))
elif self._rule_ref:
return Rule.get_by_ref(self._rule_ref)
raise ValueError('One of _rule_file_path or _rule_ref should be specified.')
def _get_trigger_instance_db(self):
if self._trigger_instance_file_path:
return self._get_trigger_instance_db_from_file(
file_path=os.path.realpath(self._trigger_instance_file_path))
elif self._trigger_instance_id:
trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
return trigger_instance_db, trigger_db
raise ValueError('One of _trigger_instance_file_path or'
'_trigger_instance_id should be specified.')
def _get_rule_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
pack = data.get('pack', 'unknown')
name = data.get('name', 'unknown')
trigger = data['trigger']['type']
criteria = data.get('criteria', None)
action = data.get('action', {})
rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action=action,
enabled=True)
rule_db.id = 'rule_tester_rule'
return rule_db
def _get_trigger_instance_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
instance = TriggerInstanceDB(**data)
instance.id = 'rule_tester_instance'
trigger_ref = ResourceReference.from_string_reference(instance['trigger'])
trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
return instance, trigger_db
|
fetch_currencies
|
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
# MASKED: fetch_currencies function (lines 413-470)
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
| 413
| 470
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_ticker
|
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
# MASKED: fetch_ticker function (lines 472-501)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
| 472
| 501
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_tickers
|
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
# MASKED: fetch_tickers function (lines 552-579)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
| 552
| 579
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_trades
|
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
# MASKED: fetch_trades function (lines 581-616)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
| 581
| 616
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_trading_fees
|
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
# MASKED: fetch_trading_fees function (lines 712-743)
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
| 712
| 743
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_order_book
|
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
# MASKED: fetch_order_book function (lines 745-778)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
| 745
| 778
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_ohlcv
|
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
# MASKED: fetch_ohlcv function (lines 800-836)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
| 800
| 836
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_balance
|
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
# MASKED: fetch_balance function (lines 854-871)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
| 854
| 871
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_deposit_address
|
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
# MASKED: fetch_deposit_address function (lines 873-901)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
| 873
| 901
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
create_order
|
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
# MASKED: create_order function (lines 903-997)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
| 903
| 997
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
cancel_order
|
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
# MASKED: cancel_order function (lines 1020-1042)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
| 1,020
| 1,042
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
cancel_all_orders
|
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
# MASKED: cancel_all_orders function (lines 1044-1065)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
| 1,044
| 1,065
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_order
|
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
# MASKED: fetch_order function (lines 1067-1117)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
| 1,067
| 1,117
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_open_orders
|
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
# MASKED: fetch_open_orders function (lines 1175-1229)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
| 1,175
| 1,229
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_my_trades
|
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
# MASKED: fetch_my_trades function (lines 1346-1389)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
| 1,346
| 1,389
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
withdraw
|
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
# MASKED: withdraw function (lines 1391-1422)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
| 1,391
| 1,422
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
fetch_deposits
|
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
# MASKED: fetch_deposits function (lines 1465-1502)
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
| 1,465
| 1,502
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
|
_GetReportingClient
|
Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error Reporting Handler."""
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
"""Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
"""
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
"""Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
"""
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
# MASKED: _GetReportingClient function (lines 83-92)
def ReportError(err, is_crash):
"""Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
"""
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
def HandleGcloudCrash(err):
"""Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
"""
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
|
def _GetReportingClient():
"""Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
"""
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
| 83
| 92
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error Reporting Handler."""
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
"""Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
"""
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
"""Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
"""
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
def _GetReportingClient():
"""Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
"""
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
def ReportError(err, is_crash):
"""Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
"""
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
def HandleGcloudCrash(err):
"""Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
"""
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
|
ReportError
|
Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error Reporting Handler."""
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
"""Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
"""
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
"""Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
"""
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
def _GetReportingClient():
"""Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
"""
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
# MASKED: ReportError function (lines 95-128)
def HandleGcloudCrash(err):
"""Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
"""
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
|
def ReportError(err, is_crash):
"""Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
"""
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
| 95
| 128
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error Reporting Handler."""
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
"""Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
"""
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
"""Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
"""
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
def _GetReportingClient():
"""Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
"""
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
def ReportError(err, is_crash):
"""Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
"""
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
def HandleGcloudCrash(err):
"""Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
"""
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
|
HandleGcloudCrash
|
Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error Reporting Handler."""
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
"""Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
"""
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
"""Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
"""
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
def _GetReportingClient():
"""Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
"""
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
def ReportError(err, is_crash):
"""Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
"""
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
# MASKED: HandleGcloudCrash function (lines 131-150)
|
def HandleGcloudCrash(err):
"""Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
"""
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
| 131
| 150
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error Reporting Handler."""
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
"""Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
"""
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
"""Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
"""
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
def _GetReportingClient():
"""Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
"""
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
def ReportError(err, is_crash):
"""Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
"""
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
def HandleGcloudCrash(err):
"""Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
"""
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
|
__init__
|
Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import h5py
import numpy as np
from skimage.transform import resize as skResize
from util.util import normalize, adaptive_instance_normalization
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
# MASKED: __init__ function (lines 22-40)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
#Added a new loader for loading hsi images. Uncomment the following line for normal images.
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
def stack(self, img, resize=True):
_R = img[:,:,0]
_G = img[:,:,1]
_B = img[:,:,2]
R_img = np.stack((_R,)*10, axis=2)
G_img = np.stack((_G,)*10, axis=2)
B_img = np.stack((_B,)*11, axis=2)
hsi_img = np.concatenate((B_img, G_img, R_img), axis=2)
hsi_img = self.resize(hsi_img)
hsi_img = np.einsum('abc->cab', hsi_img)
return hsi_img
def resize(self, img):
img = skResize(img, (self.opt.crop_size, self.opt.crop_size))
return img
def hsi_loader(self, path):
with h5py.File(path, 'r') as f:
d = np.array(f['data'])
hs_data = np.einsum('abc -> cab',self.resize(d))
#print('Inside hsi loader, {0}'.format(np.shape(hs_data)))
return hs_data
|
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot_B, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
| 22
| 40
|
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import h5py
import numpy as np
from skimage.transform import resize as skResize
from util.util import normalize, adaptive_instance_normalization
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot_B, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
#Added a new loader for loading hsi images. Uncomment the following line for normal images.
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
def stack(self, img, resize=True):
_R = img[:,:,0]
_G = img[:,:,1]
_B = img[:,:,2]
R_img = np.stack((_R,)*10, axis=2)
G_img = np.stack((_G,)*10, axis=2)
B_img = np.stack((_B,)*11, axis=2)
hsi_img = np.concatenate((B_img, G_img, R_img), axis=2)
hsi_img = self.resize(hsi_img)
hsi_img = np.einsum('abc->cab', hsi_img)
return hsi_img
def resize(self, img):
img = skResize(img, (self.opt.crop_size, self.opt.crop_size))
return img
def hsi_loader(self, path):
with h5py.File(path, 'r') as f:
d = np.array(f['data'])
hs_data = np.einsum('abc -> cab',self.resize(d))
#print('Inside hsi loader, {0}'.format(np.shape(hs_data)))
return hs_data
|
__getitem__
|
Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
|
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import h5py
import numpy as np
from skimage.transform import resize as skResize
from util.util import normalize, adaptive_instance_normalization
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot_B, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
# MASKED: __getitem__ function (lines 42-74)
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
def stack(self, img, resize=True):
_R = img[:,:,0]
_G = img[:,:,1]
_B = img[:,:,2]
R_img = np.stack((_R,)*10, axis=2)
G_img = np.stack((_G,)*10, axis=2)
B_img = np.stack((_B,)*11, axis=2)
hsi_img = np.concatenate((B_img, G_img, R_img), axis=2)
hsi_img = self.resize(hsi_img)
hsi_img = np.einsum('abc->cab', hsi_img)
return hsi_img
def resize(self, img):
img = skResize(img, (self.opt.crop_size, self.opt.crop_size))
return img
def hsi_loader(self, path):
with h5py.File(path, 'r') as f:
d = np.array(f['data'])
hs_data = np.einsum('abc -> cab',self.resize(d))
#print('Inside hsi loader, {0}'.format(np.shape(hs_data)))
return hs_data
|
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
#Added a new loader for loading hsi images. Uncomment the following line for normal images.
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
| 42
| 74
|
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import h5py
import numpy as np
from skimage.transform import resize as skResize
from util.util import normalize, adaptive_instance_normalization
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot_B, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
#Added a new loader for loading hsi images. Uncomment the following line for normal images.
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
def stack(self, img, resize=True):
_R = img[:,:,0]
_G = img[:,:,1]
_B = img[:,:,2]
R_img = np.stack((_R,)*10, axis=2)
G_img = np.stack((_G,)*10, axis=2)
B_img = np.stack((_B,)*11, axis=2)
hsi_img = np.concatenate((B_img, G_img, R_img), axis=2)
hsi_img = self.resize(hsi_img)
hsi_img = np.einsum('abc->cab', hsi_img)
return hsi_img
def resize(self, img):
img = skResize(img, (self.opt.crop_size, self.opt.crop_size))
return img
def hsi_loader(self, path):
with h5py.File(path, 'r') as f:
d = np.array(f['data'])
hs_data = np.einsum('abc -> cab',self.resize(d))
#print('Inside hsi loader, {0}'.format(np.shape(hs_data)))
return hs_data
|
__init__
|
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
# MASKED: __init__ function (lines 15-27)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
|
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
| 15
| 27
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
|
__init__
|
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
# MASKED: __init__ function (lines 68-83)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
|
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
| 68
| 83
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
|
get
|
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
# MASKED: get function (lines 206-231)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
| 206
| 231
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
|
GetExperimentArgs
|
Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import io
import os
import platform
import sys
import time
import unittest
import common
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'tools', 'variations'))
import fieldtrial_util
test_blacklist = [
# These tests set their own field trials and should be ignored.
'quic.Quic.testCheckPageWithQuicProxy',
'quic.Quic.testCheckPageWithQuicProxyTransaction',
'smoke.Smoke.testCheckPageWithHoldback',
]
# MASKED: GetExperimentArgs function (lines 27-49)
def GenerateTestSuites():
"""A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
"""
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if test_method.id() not in test_blacklist:
ts = unittest.TestSuite()
ts.addTest(test_method)
yield (ts, test_method.id())
def ParseFlagsWithExtraBrowserArgs(extra_args):
"""Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
"""
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = ((original_flags.browser_args if
original_flags.browser_args else '') + ' ' + extra_args)
return original_flags
return AddExtraBrowserArgs
def main():
"""Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots.
"""
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
# Each test is wrapped in its own test suite so results can be evaluated
# individually.
for test_suite, test_id in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write('%s... ' % test_id)
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2,
buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print("%s %s %s --test_filter=%s --browser_args='%s'" % (
sys.executable,
os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(
sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args))
if flags.failfast:
return
if __name__ == '__main__':
main()
|
def GetExperimentArgs():
"""Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
"""
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform = ''
if common.ParseFlags().android:
my_platform = 'android'
elif platform.system().lower() == 'linux':
my_platform = 'linux'
elif platform.system().lower() == 'windows':
my_platform = 'windows'
elif platform.system().lower() == 'darwin':
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform)
| 27
| 49
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import io
import os
import platform
import sys
import time
import unittest
import common
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'tools', 'variations'))
import fieldtrial_util
test_blacklist = [
# These tests set their own field trials and should be ignored.
'quic.Quic.testCheckPageWithQuicProxy',
'quic.Quic.testCheckPageWithQuicProxyTransaction',
'smoke.Smoke.testCheckPageWithHoldback',
]
def GetExperimentArgs():
"""Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
"""
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform = ''
if common.ParseFlags().android:
my_platform = 'android'
elif platform.system().lower() == 'linux':
my_platform = 'linux'
elif platform.system().lower() == 'windows':
my_platform = 'windows'
elif platform.system().lower() == 'darwin':
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform)
def GenerateTestSuites():
"""A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
"""
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if test_method.id() not in test_blacklist:
ts = unittest.TestSuite()
ts.addTest(test_method)
yield (ts, test_method.id())
def ParseFlagsWithExtraBrowserArgs(extra_args):
"""Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
"""
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = ((original_flags.browser_args if
original_flags.browser_args else '') + ' ' + extra_args)
return original_flags
return AddExtraBrowserArgs
def main():
"""Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots.
"""
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
# Each test is wrapped in its own test suite so results can be evaluated
# individually.
for test_suite, test_id in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write('%s... ' % test_id)
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2,
buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print("%s %s %s --test_filter=%s --browser_args='%s'" % (
sys.executable,
os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(
sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args))
if flags.failfast:
return
if __name__ == '__main__':
main()
|
GenerateTestSuites
|
A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import io
import os
import platform
import sys
import time
import unittest
import common
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'tools', 'variations'))
import fieldtrial_util
test_blacklist = [
# These tests set their own field trials and should be ignored.
'quic.Quic.testCheckPageWithQuicProxy',
'quic.Quic.testCheckPageWithQuicProxyTransaction',
'smoke.Smoke.testCheckPageWithHoldback',
]
def GetExperimentArgs():
"""Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
"""
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform = ''
if common.ParseFlags().android:
my_platform = 'android'
elif platform.system().lower() == 'linux':
my_platform = 'linux'
elif platform.system().lower() == 'windows':
my_platform = 'windows'
elif platform.system().lower() == 'darwin':
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform)
# MASKED: GenerateTestSuites function (lines 51-67)
def ParseFlagsWithExtraBrowserArgs(extra_args):
"""Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
"""
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = ((original_flags.browser_args if
original_flags.browser_args else '') + ' ' + extra_args)
return original_flags
return AddExtraBrowserArgs
def main():
"""Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots.
"""
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
# Each test is wrapped in its own test suite so results can be evaluated
# individually.
for test_suite, test_id in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write('%s... ' % test_id)
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2,
buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print("%s %s %s --test_filter=%s --browser_args='%s'" % (
sys.executable,
os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(
sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args))
if flags.failfast:
return
if __name__ == '__main__':
main()
|
def GenerateTestSuites():
"""A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
"""
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if test_method.id() not in test_blacklist:
ts = unittest.TestSuite()
ts.addTest(test_method)
yield (ts, test_method.id())
| 51
| 67
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import io
import os
import platform
import sys
import time
import unittest
import common
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'tools', 'variations'))
import fieldtrial_util
test_blacklist = [
# These tests set their own field trials and should be ignored.
'quic.Quic.testCheckPageWithQuicProxy',
'quic.Quic.testCheckPageWithQuicProxyTransaction',
'smoke.Smoke.testCheckPageWithHoldback',
]
def GetExperimentArgs():
"""Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
"""
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform = ''
if common.ParseFlags().android:
my_platform = 'android'
elif platform.system().lower() == 'linux':
my_platform = 'linux'
elif platform.system().lower() == 'windows':
my_platform = 'windows'
elif platform.system().lower() == 'darwin':
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform)
def GenerateTestSuites():
"""A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
"""
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if test_method.id() not in test_blacklist:
ts = unittest.TestSuite()
ts.addTest(test_method)
yield (ts, test_method.id())
def ParseFlagsWithExtraBrowserArgs(extra_args):
"""Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
"""
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = ((original_flags.browser_args if
original_flags.browser_args else '') + ' ' + extra_args)
return original_flags
return AddExtraBrowserArgs
def main():
"""Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots.
"""
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
# Each test is wrapped in its own test suite so results can be evaluated
# individually.
for test_suite, test_id in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write('%s... ' % test_id)
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2,
buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print("%s %s %s --test_filter=%s --browser_args='%s'" % (
sys.executable,
os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(
sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args))
if flags.failfast:
return
if __name__ == '__main__':
main()
|
ParseFlagsWithExtraBrowserArgs
|
Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import io
import os
import platform
import sys
import time
import unittest
import common
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'tools', 'variations'))
import fieldtrial_util
test_blacklist = [
# These tests set their own field trials and should be ignored.
'quic.Quic.testCheckPageWithQuicProxy',
'quic.Quic.testCheckPageWithQuicProxyTransaction',
'smoke.Smoke.testCheckPageWithHoldback',
]
def GetExperimentArgs():
"""Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
"""
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform = ''
if common.ParseFlags().android:
my_platform = 'android'
elif platform.system().lower() == 'linux':
my_platform = 'linux'
elif platform.system().lower() == 'windows':
my_platform = 'windows'
elif platform.system().lower() == 'darwin':
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform)
def GenerateTestSuites():
"""A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
"""
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if test_method.id() not in test_blacklist:
ts = unittest.TestSuite()
ts.addTest(test_method)
yield (ts, test_method.id())
# MASKED: ParseFlagsWithExtraBrowserArgs function (lines 69-85)
def main():
"""Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots.
"""
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
# Each test is wrapped in its own test suite so results can be evaluated
# individually.
for test_suite, test_id in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write('%s... ' % test_id)
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2,
buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print("%s %s %s --test_filter=%s --browser_args='%s'" % (
sys.executable,
os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(
sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args))
if flags.failfast:
return
if __name__ == '__main__':
main()
|
def ParseFlagsWithExtraBrowserArgs(extra_args):
"""Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
"""
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = ((original_flags.browser_args if
original_flags.browser_args else '') + ' ' + extra_args)
return original_flags
return AddExtraBrowserArgs
| 69
| 85
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import io
import os
import platform
import sys
import time
import unittest
import common
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'tools', 'variations'))
import fieldtrial_util
test_blacklist = [
# These tests set their own field trials and should be ignored.
'quic.Quic.testCheckPageWithQuicProxy',
'quic.Quic.testCheckPageWithQuicProxyTransaction',
'smoke.Smoke.testCheckPageWithHoldback',
]
def GetExperimentArgs():
"""Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
"""
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform = ''
if common.ParseFlags().android:
my_platform = 'android'
elif platform.system().lower() == 'linux':
my_platform = 'linux'
elif platform.system().lower() == 'windows':
my_platform = 'windows'
elif platform.system().lower() == 'darwin':
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform)
def GenerateTestSuites():
"""A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
"""
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if test_method.id() not in test_blacklist:
ts = unittest.TestSuite()
ts.addTest(test_method)
yield (ts, test_method.id())
def ParseFlagsWithExtraBrowserArgs(extra_args):
"""Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
"""
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = ((original_flags.browser_args if
original_flags.browser_args else '') + ' ' + extra_args)
return original_flags
return AddExtraBrowserArgs
def main():
"""Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots.
"""
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
# Each test is wrapped in its own test suite so results can be evaluated
# individually.
for test_suite, test_id in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write('%s... ' % test_id)
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2,
buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print("%s %s %s --test_filter=%s --browser_args='%s'" % (
sys.executable,
os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(
sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args))
if flags.failfast:
return
if __name__ == '__main__':
main()
|
initialize_tick
|
Initialize a new tick at index i, provide the index of an initialized tick lower
than i to find it easily in the linked list. Assumes that i is *not* already initialized.
:param i:
:param i_l:
|
# SPDX-FileCopyrightText: 2021 Arthur Breitman
# SPDX-License-Identifier: LicenseRef-MIT-Arthur-Breitman
import math
from collections import defaultdict
from pycfmm.data import AutoRepr
infinity = 10 ** 100
class Tick(AutoRepr):
"""
An initialized tick, marking the beginning or end of a position
"""
def __init__(self, i_prev, i_next, feeGrowthOutside):
"""
:type i_prev: int
:type i_next: int
"""
self.i_prev = i_prev
self.i_next = i_next
self.Delta_L = 0
self.feeGrowthOutside = feeGrowthOutside
self.n_positions = 0
class Position(AutoRepr):
"""
A LP's position
"""
def __init__(self, L=0):
self.L = L
self.feeGrowthInsideLast = XY()
class XY(AutoRepr):
"""
A pair of balances in asset X and Y
"""
def __init__(self, x=0, y=0):
self.x, self.y = x, y
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return XY(x, y)
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return XY(x, y)
def __neg__(self):
return XY(-self.x, -self.y)
def __mul__(self, other):
return XY(other * self.x, other * self.y)
def __eq__(self, other):
return isinstance(other, XY) and self.x == other.x and self.y == other.y
class Contract(AutoRepr):
"""
A contract in the fashion of Uniswap v3
"""
@staticmethod
def tick(srp):
"""
Computes the closest tick index below a certain price, given its square root
:param srp: square root of a price
:return: the closest tick below a certain price
"""
if srp == infinity:
return infinity
else:
return math.floor(math.log(srp) / math.log(math.sqrt(1.0001)))
@staticmethod
def srp(tick):
"""
Computes the square root of the price corresponding to a given tick
:param tick: the index of a tick
:return: the corresponding square root price
"""
if tick == infinity:
return infinity
return math.pow(math.sqrt(1.0001), tick)
def __init__(self, X, Y, fee=0.3 / 100):
self.balance = XY(X, Y)
self.srP = math.sqrt(Y / X)
self.i_a = self.tick(self.srP)
self.L = math.floor(math.sqrt(X * Y))
self.fee = fee
self.i_l = -infinity
self.ticks = {-infinity: Tick(-infinity, infinity, XY()), infinity: Tick(-infinity, infinity, XY())}
self.positions = defaultdict(Position)
self.feeGrowth = XY()
# MASKED: initialize_tick function (lines 106-123)
def collect_fees(self, user, i_l, i_u):
key = (user, i_l, i_u)
position = self.positions[key]
f_a = self.feeGrowth - self.ticks[i_u].feeGrowthOutside if self.i_a >= i_u else self.ticks[i_u].feeGrowthOutside
f_b = self.ticks[i_l].feeGrowthOutside if self.i_a >= i_l else self.feeGrowth - self.ticks[i_l].feeGrowthOutside
feeGrowthInside = self.feeGrowth - f_a - f_b
fees = (feeGrowthInside - position.feeGrowthInsideLast) * position.L
position.feeGrowthInsideLast = feeGrowthInside
return fees
def set_position(self, user, i_l, i_l_l, i_u, i_u_l, Delta_L):
assert (i_l_l <= i_l)
if i_l not in self.ticks:
self.initialize_tick(i_l, i_l_l)
assert (i_u_l <= i_u)
if i_u not in self.ticks:
self.initialize_tick(i_u, i_u_l)
position_key = (user, i_l, i_u)
fees = self.collect_fees(user, i_l, i_u)
self.positions[position_key].L += Delta_L
assert (self.positions[position_key].L >= 0)
# todo, garbage collect if we are unwinding the position completely?
Delta = XY()
# Add or remove liquidity above the current tick
if self.i_a < i_l:
Delta.x = Delta_L * (1 / self.srp(i_l) - 1 / self.srp(i_u))
Delta.y = 0
# Add or remove liquidity around the current tick
elif i_l <= self.i_a < i_u:
# update interval we are in if need be
if i_l > self.i_l:
self.i_l = i_l
Delta.x = Delta_L * (1 / self.srP - 1 / self.srp(i_u))
Delta.y = Delta_L * (self.srP - self.srp(i_l))
self.L += Delta_L
else: # i_a >= i_u
Delta.x = 0
Delta.y = Delta_L * (self.srp(i_u) - self.srp(i_l))
Delta -= fees
# make a note of how much liquidity is gained or lost when
# entering this interval
self.ticks[i_l].Delta_L += Delta_L
self.ticks[i_u].Delta_L -= Delta_L
self.balance += Delta
return -Delta
def X_to_Y(self, dX, fee=None):
# dX must be positive
assert (dX >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(dX * fee, 0)
srp_new = 1.0 / (1.0 / self.srP + (dX - fees.x) / self.L)
i_l = self.i_l
tick_new = self.tick(srp_new)
if tick_new >= i_l: # we didn't pushed past the interval
dY = - (dX - fees.x) * self.srP * srp_new
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
# compute what we got up til i_u and how much it cost
# well, what delta_X would have taken me there?
self.i_l = self.ticks[self.i_l].i_prev
srP_l = self.srp(i_l)
dY = self.L * (srP_l - self.srP)
dX_ = - dY / (self.srP * srP_l)
tmp = dX_ / (1.0 - fee)
dX_, fees = tmp, XY(tmp - dX_, 0)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
# remove the liquidity we used to have
self.L -= self.ticks[i_l].Delta_L
# flip feeGrowth
self.ticks[i_l].feeGrowthOutside = self.feeGrowth - self.ticks[i_l].feeGrowthOutside
self.srP = self.srp(i_l) - 1e-16 # todo can we do better than this crutch?
user = XY(-dX_, -dY)
self.balance -= user
return user + self.X_to_Y(dX - dX_, fee)
def Y_to_X(self, dY, fee=None):
# dY must be positive
assert (dY >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(0, dY * fee)
srp_new = self.srP + (dY - fees.y) / self.L
i_u = self.ticks[self.i_l].i_next
tick_new = self.tick(srp_new)
if tick_new < i_u: # we did not push past the interval
dX = - (dY - fees.y) / (self.srP * srp_new)
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
self.i_l = i_u
srP_u = self.srp(i_u)
dY_ = self.L * (srP_u - self.srP)
dX = - dY_ / (self.srP * srP_u)
tmp = dY_ / (1.0 - fee)
dY_, fees = tmp, XY(0, tmp - dY_)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
self.L += self.ticks[i_u].Delta_L
self.ticks[i_u].feeGrowthOutside = self.feeGrowth - self.ticks[i_u].feeGrowthOutside
self.srP = srP_u
user = XY(-dX, -dY_)
self.balance -= user
return user + self.Y_to_X(dY - dY_, fee)
|
def initialize_tick(self, i, i_l):
"""
Initialize a new tick at index i, provide the index of an initialized tick lower
than i to find it easily in the linked list. Assumes that i is *not* already initialized.
:param i:
:param i_l:
"""
assert (i not in self.ticks)
assert (i_l < i)
i_next = self.ticks[i_l].i_next
if i_next > i:
self.ticks[i_l].i_next = i
# find an instance where i_a = i and we set XY(0, 0) and that's wrong
self.ticks[i] = Tick(i_l, i_next, self.feeGrowth if self.i_a >= i else XY())
self.ticks[i_next].i_prev = i
else:
self.initialize_tick(i, i_next)
| 106
| 123
|
# SPDX-FileCopyrightText: 2021 Arthur Breitman
# SPDX-License-Identifier: LicenseRef-MIT-Arthur-Breitman
import math
from collections import defaultdict
from pycfmm.data import AutoRepr
infinity = 10 ** 100
class Tick(AutoRepr):
"""
An initialized tick, marking the beginning or end of a position
"""
def __init__(self, i_prev, i_next, feeGrowthOutside):
"""
:type i_prev: int
:type i_next: int
"""
self.i_prev = i_prev
self.i_next = i_next
self.Delta_L = 0
self.feeGrowthOutside = feeGrowthOutside
self.n_positions = 0
class Position(AutoRepr):
"""
A LP's position
"""
def __init__(self, L=0):
self.L = L
self.feeGrowthInsideLast = XY()
class XY(AutoRepr):
"""
A pair of balances in asset X and Y
"""
def __init__(self, x=0, y=0):
self.x, self.y = x, y
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return XY(x, y)
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return XY(x, y)
def __neg__(self):
return XY(-self.x, -self.y)
def __mul__(self, other):
return XY(other * self.x, other * self.y)
def __eq__(self, other):
return isinstance(other, XY) and self.x == other.x and self.y == other.y
class Contract(AutoRepr):
"""
A contract in the fashion of Uniswap v3
"""
@staticmethod
def tick(srp):
"""
Computes the closest tick index below a certain price, given its square root
:param srp: square root of a price
:return: the closest tick below a certain price
"""
if srp == infinity:
return infinity
else:
return math.floor(math.log(srp) / math.log(math.sqrt(1.0001)))
@staticmethod
def srp(tick):
"""
Computes the square root of the price corresponding to a given tick
:param tick: the index of a tick
:return: the corresponding square root price
"""
if tick == infinity:
return infinity
return math.pow(math.sqrt(1.0001), tick)
def __init__(self, X, Y, fee=0.3 / 100):
self.balance = XY(X, Y)
self.srP = math.sqrt(Y / X)
self.i_a = self.tick(self.srP)
self.L = math.floor(math.sqrt(X * Y))
self.fee = fee
self.i_l = -infinity
self.ticks = {-infinity: Tick(-infinity, infinity, XY()), infinity: Tick(-infinity, infinity, XY())}
self.positions = defaultdict(Position)
self.feeGrowth = XY()
def initialize_tick(self, i, i_l):
"""
Initialize a new tick at index i, provide the index of an initialized tick lower
than i to find it easily in the linked list. Assumes that i is *not* already initialized.
:param i:
:param i_l:
"""
assert (i not in self.ticks)
assert (i_l < i)
i_next = self.ticks[i_l].i_next
if i_next > i:
self.ticks[i_l].i_next = i
# find an instance where i_a = i and we set XY(0, 0) and that's wrong
self.ticks[i] = Tick(i_l, i_next, self.feeGrowth if self.i_a >= i else XY())
self.ticks[i_next].i_prev = i
else:
self.initialize_tick(i, i_next)
def collect_fees(self, user, i_l, i_u):
key = (user, i_l, i_u)
position = self.positions[key]
f_a = self.feeGrowth - self.ticks[i_u].feeGrowthOutside if self.i_a >= i_u else self.ticks[i_u].feeGrowthOutside
f_b = self.ticks[i_l].feeGrowthOutside if self.i_a >= i_l else self.feeGrowth - self.ticks[i_l].feeGrowthOutside
feeGrowthInside = self.feeGrowth - f_a - f_b
fees = (feeGrowthInside - position.feeGrowthInsideLast) * position.L
position.feeGrowthInsideLast = feeGrowthInside
return fees
def set_position(self, user, i_l, i_l_l, i_u, i_u_l, Delta_L):
assert (i_l_l <= i_l)
if i_l not in self.ticks:
self.initialize_tick(i_l, i_l_l)
assert (i_u_l <= i_u)
if i_u not in self.ticks:
self.initialize_tick(i_u, i_u_l)
position_key = (user, i_l, i_u)
fees = self.collect_fees(user, i_l, i_u)
self.positions[position_key].L += Delta_L
assert (self.positions[position_key].L >= 0)
# todo, garbage collect if we are unwinding the position completely?
Delta = XY()
# Add or remove liquidity above the current tick
if self.i_a < i_l:
Delta.x = Delta_L * (1 / self.srp(i_l) - 1 / self.srp(i_u))
Delta.y = 0
# Add or remove liquidity around the current tick
elif i_l <= self.i_a < i_u:
# update interval we are in if need be
if i_l > self.i_l:
self.i_l = i_l
Delta.x = Delta_L * (1 / self.srP - 1 / self.srp(i_u))
Delta.y = Delta_L * (self.srP - self.srp(i_l))
self.L += Delta_L
else: # i_a >= i_u
Delta.x = 0
Delta.y = Delta_L * (self.srp(i_u) - self.srp(i_l))
Delta -= fees
# make a note of how much liquidity is gained or lost when
# entering this interval
self.ticks[i_l].Delta_L += Delta_L
self.ticks[i_u].Delta_L -= Delta_L
self.balance += Delta
return -Delta
def X_to_Y(self, dX, fee=None):
# dX must be positive
assert (dX >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(dX * fee, 0)
srp_new = 1.0 / (1.0 / self.srP + (dX - fees.x) / self.L)
i_l = self.i_l
tick_new = self.tick(srp_new)
if tick_new >= i_l: # we didn't pushed past the interval
dY = - (dX - fees.x) * self.srP * srp_new
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
# compute what we got up til i_u and how much it cost
# well, what delta_X would have taken me there?
self.i_l = self.ticks[self.i_l].i_prev
srP_l = self.srp(i_l)
dY = self.L * (srP_l - self.srP)
dX_ = - dY / (self.srP * srP_l)
tmp = dX_ / (1.0 - fee)
dX_, fees = tmp, XY(tmp - dX_, 0)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
# remove the liquidity we used to have
self.L -= self.ticks[i_l].Delta_L
# flip feeGrowth
self.ticks[i_l].feeGrowthOutside = self.feeGrowth - self.ticks[i_l].feeGrowthOutside
self.srP = self.srp(i_l) - 1e-16 # todo can we do better than this crutch?
user = XY(-dX_, -dY)
self.balance -= user
return user + self.X_to_Y(dX - dX_, fee)
def Y_to_X(self, dY, fee=None):
# dY must be positive
assert (dY >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(0, dY * fee)
srp_new = self.srP + (dY - fees.y) / self.L
i_u = self.ticks[self.i_l].i_next
tick_new = self.tick(srp_new)
if tick_new < i_u: # we did not push past the interval
dX = - (dY - fees.y) / (self.srP * srp_new)
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
self.i_l = i_u
srP_u = self.srp(i_u)
dY_ = self.L * (srP_u - self.srP)
dX = - dY_ / (self.srP * srP_u)
tmp = dY_ / (1.0 - fee)
dY_, fees = tmp, XY(0, tmp - dY_)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
self.L += self.ticks[i_u].Delta_L
self.ticks[i_u].feeGrowthOutside = self.feeGrowth - self.ticks[i_u].feeGrowthOutside
self.srP = srP_u
user = XY(-dX, -dY_)
self.balance -= user
return user + self.Y_to_X(dY - dY_, fee)
|
random_rotation
|
Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
# MASKED: random_rotation function (lines 46-73)
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
| 46
| 73
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
random_shift
|
Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
# MASKED: random_shift function (lines 76-105)
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
| 76
| 105
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
random_shear
|
Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
# MASKED: random_shear function (lines 108-135)
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
| 108
| 135
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
random_zoom
|
Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
# MASKED: random_zoom function (lines 138-175)
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
| 138
| 175
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
random_channel_shift
|
Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
# MASKED: random_channel_shift function (lines 178-199)
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
| 178
| 199
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
random_brightness
|
Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
# MASKED: random_brightness function (lines 202-227)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
| 202
| 227
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
apply_transform
|
Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
# MASKED: apply_transform function (lines 239-271)
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
| 239
| 271
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
array_to_img
|
Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
# MASKED: array_to_img function (lines 281-329)
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
| 281
| 329
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
img_to_array
|
Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
# MASKED: img_to_array function (lines 332-364)
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
| 332
| 364
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.